content stringlengths 5 1.05M |
|---|
from werkzeug.contrib.cache import NullCache
from app import cache, settings
from tests.integration.integration_test_case import IntegrationTestCase
class TestApplicationVariables(IntegrationTestCase):
def setUp(self):
settings.EQ_ENABLE_CACHE = False
IntegrationTestCase.setUp(self)
def tearDown(self):
settings.EQ_ENABLE_CACHE = True
IntegrationTestCase.tearDown(self)
def test_schema_is_cached(self):
with self.application.app_context():
self.assertTrue(isinstance(cache.cache, NullCache))
|
quantity = 3
itemno = 567
price = 49.95
myorder = "I want {} pieces of item {} for {} dollars."
print(myorder.format(quantity, itemno, price))
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class ReleaseClient(VssClient):
"""Release
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(ReleaseClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'efc2f575-36ef-48e9-b672-0c6fb4a48ac5'
def get_agent_artifact_definitions(self, project, release_id):
"""GetAgentArtifactDefinitions.
[Preview API] Returns the artifact details that automation agent requires
:param str project: Project ID or project name
:param int release_id:
:rtype: [AgentArtifactDefinition]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='f2571c27-bf50-4938-b396-32d109ddef26',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[AgentArtifactDefinition]', self._unwrap_collection(response))
def get_approvals(self, project, assigned_to_filter=None, status_filter=None, release_ids_filter=None, type_filter=None, top=None, continuation_token=None, query_order=None, include_my_group_approvals=None):
"""GetApprovals.
[Preview API] Get a list of approvals
:param str project: Project ID or project name
:param str assigned_to_filter: Approvals assigned to this user.
:param str status_filter: Approvals with this status. Default is 'pending'.
:param [int] release_ids_filter: Approvals for release id(s) mentioned in the filter. Multiple releases can be mentioned by separating them with ',' e.g. releaseIdsFilter=1,2,3,4.
:param str type_filter: Approval with this type.
:param int top: Number of approvals to get. Default is 50.
:param int continuation_token: Gets the approvals after the continuation token provided.
:param str query_order: Gets the results in the defined order of created approvals. Default is 'descending'.
:param bool include_my_group_approvals: 'true' to include my group approvals. Default is 'false'.
:rtype: [ReleaseApproval]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if assigned_to_filter is not None:
query_parameters['assignedToFilter'] = self._serialize.query('assigned_to_filter', assigned_to_filter, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if release_ids_filter is not None:
release_ids_filter = ",".join(map(str, release_ids_filter))
query_parameters['releaseIdsFilter'] = self._serialize.query('release_ids_filter', release_ids_filter, 'str')
if type_filter is not None:
query_parameters['typeFilter'] = self._serialize.query('type_filter', type_filter, 'str')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if include_my_group_approvals is not None:
query_parameters['includeMyGroupApprovals'] = self._serialize.query('include_my_group_approvals', include_my_group_approvals, 'bool')
response = self._send(http_method='GET',
location_id='b47c6458-e73b-47cb-a770-4df1e8813a91',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseApproval]', self._unwrap_collection(response))
def get_approval_history(self, project, approval_step_id):
"""GetApprovalHistory.
[Preview API] Get approval history.
:param str project: Project ID or project name
:param int approval_step_id: Id of the approval.
:rtype: :class:`<ReleaseApproval> <release.v4_0.models.ReleaseApproval>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if approval_step_id is not None:
route_values['approvalStepId'] = self._serialize.url('approval_step_id', approval_step_id, 'int')
response = self._send(http_method='GET',
location_id='250c7158-852e-4130-a00f-a0cce9b72d05',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('ReleaseApproval', response)
def get_approval(self, project, approval_id, include_history=None):
"""GetApproval.
[Preview API] Get an approval.
:param str project: Project ID or project name
:param int approval_id: Id of the approval.
:param bool include_history: 'true' to include history of the approval. Default is 'false'.
:rtype: :class:`<ReleaseApproval> <release.v4_0.models.ReleaseApproval>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if approval_id is not None:
route_values['approvalId'] = self._serialize.url('approval_id', approval_id, 'int')
query_parameters = {}
if include_history is not None:
query_parameters['includeHistory'] = self._serialize.query('include_history', include_history, 'bool')
response = self._send(http_method='GET',
location_id='9328e074-59fb-465a-89d9-b09c82ee5109',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseApproval', response)
def update_release_approval(self, approval, project, approval_id):
"""UpdateReleaseApproval.
[Preview API] Update status of an approval
:param :class:`<ReleaseApproval> <release.v4_0.models.ReleaseApproval>` approval: ReleaseApproval object having status, approver and comments.
:param str project: Project ID or project name
:param int approval_id: Id of the approval.
:rtype: :class:`<ReleaseApproval> <release.v4_0.models.ReleaseApproval>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if approval_id is not None:
route_values['approvalId'] = self._serialize.url('approval_id', approval_id, 'int')
content = self._serialize.body(approval, 'ReleaseApproval')
response = self._send(http_method='PATCH',
location_id='9328e074-59fb-465a-89d9-b09c82ee5109',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ReleaseApproval', response)
def update_release_approvals(self, approvals, project):
"""UpdateReleaseApprovals.
[Preview API]
:param [ReleaseApproval] approvals:
:param str project: Project ID or project name
:rtype: [ReleaseApproval]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(approvals, '[ReleaseApproval]')
response = self._send(http_method='PATCH',
location_id='c957584a-82aa-4131-8222-6d47f78bfa7a',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[ReleaseApproval]', self._unwrap_collection(response))
def get_auto_trigger_issues(self, artifact_type, source_id, artifact_version_id):
"""GetAutoTriggerIssues.
[Preview API]
:param str artifact_type:
:param str source_id:
:param str artifact_version_id:
:rtype: [AutoTriggerIssue]
"""
query_parameters = {}
if artifact_type is not None:
query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str')
if source_id is not None:
query_parameters['sourceId'] = self._serialize.query('source_id', source_id, 'str')
if artifact_version_id is not None:
query_parameters['artifactVersionId'] = self._serialize.query('artifact_version_id', artifact_version_id, 'str')
response = self._send(http_method='GET',
location_id='c1a68497-69da-40fb-9423-cab19cfeeca9',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[AutoTriggerIssue]', self._unwrap_collection(response))
def get_release_changes(self, project, release_id, base_release_id=None, top=None):
"""GetReleaseChanges.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int base_release_id:
:param int top:
:rtype: [Change]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if base_release_id is not None:
query_parameters['baseReleaseId'] = self._serialize.query('base_release_id', base_release_id, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='8dcf9fe9-ca37-4113-8ee1-37928e98407c',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Change]', self._unwrap_collection(response))
def get_definition_environments(self, project, task_group_id=None, property_filters=None):
"""GetDefinitionEnvironments.
[Preview API]
:param str project: Project ID or project name
:param str task_group_id:
:param [str] property_filters:
:rtype: [DefinitionEnvironmentReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if task_group_id is not None:
query_parameters['taskGroupId'] = self._serialize.query('task_group_id', task_group_id, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='12b5d21a-f54c-430e-a8c1-7515d196890e',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DefinitionEnvironmentReference]', self._unwrap_collection(response))
def create_release_definition(self, release_definition, project):
"""CreateReleaseDefinition.
[Preview API] Create a release definition
:param :class:`<ReleaseDefinition> <release.v4_0.models.ReleaseDefinition>` release_definition: release definition object to create.
:param str project: Project ID or project name
:rtype: :class:`<ReleaseDefinition> <release.v4_0.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_definition, 'ReleaseDefinition')
response = self._send(http_method='POST',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values,
content=content)
return self._deserialize('ReleaseDefinition', response)
def delete_release_definition(self, project, definition_id):
"""DeleteReleaseDefinition.
[Preview API] Delete a release definition.
:param str project: Project ID or project name
:param int definition_id: Id of the release definition.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
self._send(http_method='DELETE',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values)
def get_release_definition(self, project, definition_id, property_filters=None):
"""GetReleaseDefinition.
[Preview API] Get a release definition.
:param str project: Project ID or project name
:param int definition_id: Id of the release definition.
:param [str] property_filters: A comma-delimited list of extended properties to retrieve.
:rtype: :class:`<ReleaseDefinition> <release.v4_0.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseDefinition', response)
def get_release_definition_revision(self, project, definition_id, revision, **kwargs):
"""GetReleaseDefinitionRevision.
[Preview API] Get release definition of a given revision.
:param str project: Project ID or project name
:param int definition_id: Id of the release definition.
:param int revision: Revision number of the release definition.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_release_definitions(self, project, search_text=None, expand=None, artifact_type=None, artifact_source_id=None, top=None, continuation_token=None, query_order=None, path=None, is_exact_name_match=None, tag_filter=None, property_filters=None, definition_id_filter=None):
"""GetReleaseDefinitions.
[Preview API] Get a list of release definitions.
:param str project: Project ID or project name
:param str search_text: Get release definitions with names starting with searchText.
:param str expand: The properties that should be expanded in the list of Release definitions.
:param str artifact_type: Release definitions with given artifactType will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild.
:param str artifact_source_id: Release definitions with given artifactSourceId will be returned. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json at https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions.
:param int top: Number of release definitions to get.
:param str continuation_token: Gets the release definitions after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'IdAscending'.
:param str path: Gets the release definitions under the specified path.
:param bool is_exact_name_match: 'true'to gets the release definitions with exact match as specified in searchText. Default is 'false'.
:param [str] tag_filter: A comma-delimited list of tags. Only release definitions with these tags will be returned.
:param [str] property_filters: A comma-delimited list of extended properties to retrieve.
:param [str] definition_id_filter: A comma-delimited list of release definitions to retrieve.
:rtype: [ReleaseDefinition]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if search_text is not None:
query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if artifact_type is not None:
query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str')
if artifact_source_id is not None:
query_parameters['artifactSourceId'] = self._serialize.query('artifact_source_id', artifact_source_id, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if is_exact_name_match is not None:
query_parameters['isExactNameMatch'] = self._serialize.query('is_exact_name_match', is_exact_name_match, 'bool')
if tag_filter is not None:
tag_filter = ",".join(tag_filter)
query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if definition_id_filter is not None:
definition_id_filter = ",".join(definition_id_filter)
query_parameters['definitionIdFilter'] = self._serialize.query('definition_id_filter', definition_id_filter, 'str')
response = self._send(http_method='GET',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseDefinition]', self._unwrap_collection(response))
def update_release_definition(self, release_definition, project):
"""UpdateReleaseDefinition.
[Preview API] Update a release definition.
:param :class:`<ReleaseDefinition> <release.v4_0.models.ReleaseDefinition>` release_definition: Release definition object to update.
:param str project: Project ID or project name
:rtype: :class:`<ReleaseDefinition> <release.v4_0.models.ReleaseDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_definition, 'ReleaseDefinition')
response = self._send(http_method='PUT',
location_id='d8f96f24-8ea7-4cb6-baab-2df8fc515665',
version='4.0-preview.3',
route_values=route_values,
content=content)
return self._deserialize('ReleaseDefinition', response)
def get_deployments(self, project, definition_id=None, definition_environment_id=None, created_by=None, min_modified_time=None, max_modified_time=None, deployment_status=None, operation_status=None, latest_attempts_only=None, query_order=None, top=None, continuation_token=None, created_for=None):
"""GetDeployments.
[Preview API]
:param str project: Project ID or project name
:param int definition_id:
:param int definition_environment_id:
:param str created_by:
:param datetime min_modified_time:
:param datetime max_modified_time:
:param str deployment_status:
:param str operation_status:
:param bool latest_attempts_only:
:param str query_order:
:param int top:
:param int continuation_token:
:param str created_for:
:rtype: [Deployment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if definition_environment_id is not None:
query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int')
if created_by is not None:
query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str')
if min_modified_time is not None:
query_parameters['minModifiedTime'] = self._serialize.query('min_modified_time', min_modified_time, 'iso-8601')
if max_modified_time is not None:
query_parameters['maxModifiedTime'] = self._serialize.query('max_modified_time', max_modified_time, 'iso-8601')
if deployment_status is not None:
query_parameters['deploymentStatus'] = self._serialize.query('deployment_status', deployment_status, 'str')
if operation_status is not None:
query_parameters['operationStatus'] = self._serialize.query('operation_status', operation_status, 'str')
if latest_attempts_only is not None:
query_parameters['latestAttemptsOnly'] = self._serialize.query('latest_attempts_only', latest_attempts_only, 'bool')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if created_for is not None:
query_parameters['createdFor'] = self._serialize.query('created_for', created_for, 'str')
response = self._send(http_method='GET',
location_id='b005ef73-cddc-448e-9ba2-5193bf36b19f',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Deployment]', self._unwrap_collection(response))
def get_deployments_for_multiple_environments(self, query_parameters, project):
"""GetDeploymentsForMultipleEnvironments.
[Preview API]
:param :class:`<DeploymentQueryParameters> <release.v4_0.models.DeploymentQueryParameters>` query_parameters:
:param str project: Project ID or project name
:rtype: [Deployment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(query_parameters, 'DeploymentQueryParameters')
response = self._send(http_method='POST',
location_id='b005ef73-cddc-448e-9ba2-5193bf36b19f',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[Deployment]', self._unwrap_collection(response))
def get_release_environment(self, project, release_id, environment_id):
"""GetReleaseEnvironment.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:rtype: :class:`<ReleaseEnvironment> <release.v4_0.models.ReleaseEnvironment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
response = self._send(http_method='GET',
location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb',
version='4.0-preview.4',
route_values=route_values)
return self._deserialize('ReleaseEnvironment', response)
def update_release_environment(self, environment_update_data, project, release_id, environment_id):
"""UpdateReleaseEnvironment.
[Preview API] Update the status of a release environment
:param :class:`<ReleaseEnvironmentUpdateMetadata> <release.v4_0.models.ReleaseEnvironmentUpdateMetadata>` environment_update_data: Environment update meta data.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of release environment.
:rtype: :class:`<ReleaseEnvironment> <release.v4_0.models.ReleaseEnvironment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
content = self._serialize.body(environment_update_data, 'ReleaseEnvironmentUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='a7e426b1-03dc-48af-9dfe-c98bac612dcb',
version='4.0-preview.4',
route_values=route_values,
content=content)
return self._deserialize('ReleaseEnvironment', response)
def create_definition_environment_template(self, template, project):
"""CreateDefinitionEnvironmentTemplate.
[Preview API]
:param :class:`<ReleaseDefinitionEnvironmentTemplate> <release.v4_0.models.ReleaseDefinitionEnvironmentTemplate>` template:
:param str project: Project ID or project name
:rtype: :class:`<ReleaseDefinitionEnvironmentTemplate> <release.v4_0.models.ReleaseDefinitionEnvironmentTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(template, 'ReleaseDefinitionEnvironmentTemplate')
response = self._send(http_method='POST',
location_id='6b03b696-824e-4479-8eb2-6644a51aba89',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ReleaseDefinitionEnvironmentTemplate', response)
def delete_definition_environment_template(self, project, template_id):
"""DeleteDefinitionEnvironmentTemplate.
[Preview API]
:param str project: Project ID or project name
:param str template_id:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if template_id is not None:
query_parameters['templateId'] = self._serialize.query('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='6b03b696-824e-4479-8eb2-6644a51aba89',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def get_definition_environment_template(self, project, template_id):
"""GetDefinitionEnvironmentTemplate.
[Preview API]
:param str project: Project ID or project name
:param str template_id:
:rtype: :class:`<ReleaseDefinitionEnvironmentTemplate> <release.v4_0.models.ReleaseDefinitionEnvironmentTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if template_id is not None:
query_parameters['templateId'] = self._serialize.query('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='6b03b696-824e-4479-8eb2-6644a51aba89',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseDefinitionEnvironmentTemplate', response)
def list_definition_environment_templates(self, project):
"""ListDefinitionEnvironmentTemplates.
[Preview API]
:param str project: Project ID or project name
:rtype: [ReleaseDefinitionEnvironmentTemplate]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='6b03b696-824e-4479-8eb2-6644a51aba89',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('[ReleaseDefinitionEnvironmentTemplate]', self._unwrap_collection(response))
def create_favorites(self, favorite_items, project, scope, identity_id=None):
"""CreateFavorites.
[Preview API]
:param [FavoriteItem] favorite_items:
:param str project: Project ID or project name
:param str scope:
:param str identity_id:
:rtype: [FavoriteItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if scope is not None:
route_values['scope'] = self._serialize.url('scope', scope, 'str')
query_parameters = {}
if identity_id is not None:
query_parameters['identityId'] = self._serialize.query('identity_id', identity_id, 'str')
content = self._serialize.body(favorite_items, '[FavoriteItem]')
response = self._send(http_method='POST',
location_id='938f7222-9acb-48fe-b8a3-4eda04597171',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[FavoriteItem]', self._unwrap_collection(response))
def delete_favorites(self, project, scope, identity_id=None, favorite_item_ids=None):
"""DeleteFavorites.
[Preview API]
:param str project: Project ID or project name
:param str scope:
:param str identity_id:
:param str favorite_item_ids:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if scope is not None:
route_values['scope'] = self._serialize.url('scope', scope, 'str')
query_parameters = {}
if identity_id is not None:
query_parameters['identityId'] = self._serialize.query('identity_id', identity_id, 'str')
if favorite_item_ids is not None:
query_parameters['favoriteItemIds'] = self._serialize.query('favorite_item_ids', favorite_item_ids, 'str')
self._send(http_method='DELETE',
location_id='938f7222-9acb-48fe-b8a3-4eda04597171',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_favorites(self, project, scope, identity_id=None):
"""GetFavorites.
[Preview API]
:param str project: Project ID or project name
:param str scope:
:param str identity_id:
:rtype: [FavoriteItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if scope is not None:
route_values['scope'] = self._serialize.url('scope', scope, 'str')
query_parameters = {}
if identity_id is not None:
query_parameters['identityId'] = self._serialize.query('identity_id', identity_id, 'str')
response = self._send(http_method='GET',
location_id='938f7222-9acb-48fe-b8a3-4eda04597171',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FavoriteItem]', self._unwrap_collection(response))
def create_folder(self, folder, project, path):
"""CreateFolder.
[Preview API] Creates a new folder
:param :class:`<Folder> <release.v4_0.models.Folder>` folder:
:param str project: Project ID or project name
:param str path:
:rtype: :class:`<Folder> <release.v4_0.models.Folder>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(folder, 'Folder')
response = self._send(http_method='POST',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Folder', response)
def delete_folder(self, project, path):
"""DeleteFolder.
[Preview API] Deletes a definition folder for given folder name and path and all it's existing definitions
:param str project: Project ID or project name
:param str path:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
self._send(http_method='DELETE',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='4.0-preview.1',
route_values=route_values)
def get_folders(self, project, path=None, query_order=None):
"""GetFolders.
[Preview API] Gets folders
:param str project: Project ID or project name
:param str path:
:param str query_order:
:rtype: [Folder]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Folder]', self._unwrap_collection(response))
def update_folder(self, folder, project, path):
"""UpdateFolder.
[Preview API] Updates an existing folder at given existing path
:param :class:`<Folder> <release.v4_0.models.Folder>` folder:
:param str project: Project ID or project name
:param str path:
:rtype: :class:`<Folder> <release.v4_0.models.Folder>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(folder, 'Folder')
response = self._send(http_method='PATCH',
location_id='f7ddf76d-ce0c-4d68-94ff-becaec5d9dea',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Folder', response)
def get_release_history(self, project, release_id):
"""GetReleaseHistory.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:rtype: [ReleaseRevision]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='23f461c8-629a-4144-a076-3054fa5f268a',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[ReleaseRevision]', self._unwrap_collection(response))
def get_input_values(self, query, project):
"""GetInputValues.
[Preview API]
:param :class:`<InputValuesQuery> <release.v4_0.models.InputValuesQuery>` query:
:param str project: Project ID or project name
:rtype: :class:`<InputValuesQuery> <release.v4_0.models.InputValuesQuery>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(query, 'InputValuesQuery')
response = self._send(http_method='POST',
location_id='71dd499b-317d-45ea-9134-140ea1932b5e',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('InputValuesQuery', response)
def get_issues(self, project, build_id, source_id=None):
"""GetIssues.
[Preview API]
:param str project: Project ID or project name
:param int build_id:
:param str source_id:
:rtype: [AutoTriggerIssue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if source_id is not None:
query_parameters['sourceId'] = self._serialize.query('source_id', source_id, 'str')
response = self._send(http_method='GET',
location_id='cd42261a-f5c6-41c8-9259-f078989b9f25',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[AutoTriggerIssue]', self._unwrap_collection(response))
def get_log(self, project, release_id, environment_id, task_id, attempt_id=None, **kwargs):
"""GetLog.
[Preview API] Gets logs
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of release environment.
:param int task_id: ReleaseTask Id for the log.
:param int attempt_id: Id of the attempt.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if task_id is not None:
route_values['taskId'] = self._serialize.url('task_id', task_id, 'int')
query_parameters = {}
if attempt_id is not None:
query_parameters['attemptId'] = self._serialize.query('attempt_id', attempt_id, 'int')
response = self._send(http_method='GET',
location_id='e71ba1ed-c0a4-4a28-a61f-2dd5f68cf3fd',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_logs(self, project, release_id, **kwargs):
"""GetLogs.
[Preview API] Get logs for a release Id.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='c37fbab5-214b-48e4-a55b-cb6b4f6e4038',
version='4.0-preview.2',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_task_log(self, project, release_id, environment_id, release_deploy_phase_id, task_id, **kwargs):
"""GetTaskLog.
[Preview API] Gets the task log of a release as a plain text file.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int environment_id: Id of release environment.
:param int release_deploy_phase_id: Release deploy phase Id.
:param int task_id: ReleaseTask Id for the log.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if release_deploy_phase_id is not None:
route_values['releaseDeployPhaseId'] = self._serialize.url('release_deploy_phase_id', release_deploy_phase_id, 'int')
if task_id is not None:
route_values['taskId'] = self._serialize.url('task_id', task_id, 'int')
response = self._send(http_method='GET',
location_id='17c91af7-09fd-4256-bff1-c24ee4f73bc0',
version='4.0-preview.2',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_manual_intervention(self, project, release_id, manual_intervention_id):
"""GetManualIntervention.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int manual_intervention_id:
:rtype: :class:`<ManualIntervention> <release.v4_0.models.ManualIntervention>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if manual_intervention_id is not None:
route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int')
response = self._send(http_method='GET',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('ManualIntervention', response)
def get_manual_interventions(self, project, release_id):
"""GetManualInterventions.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:rtype: [ManualIntervention]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[ManualIntervention]', self._unwrap_collection(response))
def update_manual_intervention(self, manual_intervention_update_metadata, project, release_id, manual_intervention_id):
"""UpdateManualIntervention.
[Preview API]
:param :class:`<ManualInterventionUpdateMetadata> <release.v4_0.models.ManualInterventionUpdateMetadata>` manual_intervention_update_metadata:
:param str project: Project ID or project name
:param int release_id:
:param int manual_intervention_id:
:rtype: :class:`<ManualIntervention> <release.v4_0.models.ManualIntervention>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if manual_intervention_id is not None:
route_values['manualInterventionId'] = self._serialize.url('manual_intervention_id', manual_intervention_id, 'int')
content = self._serialize.body(manual_intervention_update_metadata, 'ManualInterventionUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='616c46e4-f370-4456-adaa-fbaf79c7b79e',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ManualIntervention', response)
def get_metrics(self, project, min_metrics_time=None):
"""GetMetrics.
[Preview API]
:param str project: Project ID or project name
:param datetime min_metrics_time:
:rtype: [Metric]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='cd1502bb-3c73-4e11-80a6-d11308dceae5',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Metric]', self._unwrap_collection(response))
def get_release_projects(self, artifact_type, artifact_source_id):
"""GetReleaseProjects.
[Preview API]
:param str artifact_type:
:param str artifact_source_id:
:rtype: [ProjectReference]
"""
query_parameters = {}
if artifact_type is not None:
query_parameters['artifactType'] = self._serialize.query('artifact_type', artifact_type, 'str')
if artifact_source_id is not None:
query_parameters['artifactSourceId'] = self._serialize.query('artifact_source_id', artifact_source_id, 'str')
response = self._send(http_method='GET',
location_id='917ace4a-79d1-45a7-987c-7be4db4268fa',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[ProjectReference]', self._unwrap_collection(response))
def get_releases(self, project=None, definition_id=None, definition_environment_id=None, search_text=None, created_by=None, status_filter=None, environment_status_filter=None, min_created_time=None, max_created_time=None, query_order=None, top=None, continuation_token=None, expand=None, artifact_type_id=None, source_id=None, artifact_version_id=None, source_branch_filter=None, is_deleted=None, tag_filter=None, property_filters=None):
"""GetReleases.
[Preview API] Get a list of releases
:param str project: Project ID or project name
:param int definition_id: Releases from this release definition Id.
:param int definition_environment_id:
:param str search_text: Releases with names starting with searchText.
:param str created_by: Releases created by this user.
:param str status_filter: Releases that have this status.
:param int environment_status_filter:
:param datetime min_created_time: Releases that were created after this time.
:param datetime max_created_time: Releases that were created before this time.
:param str query_order: Gets the results in the defined order of created date for releases. Default is descending.
:param int top: Number of releases to get. Default is 50.
:param int continuation_token: Gets the releases after the continuation token provided.
:param str expand: The property that should be expanded in the list of releases.
:param str artifact_type_id: Releases with given artifactTypeId will be returned. Values can be Build, Jenkins, GitHub, Nuget, Team Build (external), ExternalTFSBuild, Git, TFVC, ExternalTfsXamlBuild.
:param str source_id: Unique identifier of the artifact used. e.g. For build it would be {projectGuid}:{BuildDefinitionId}, for Jenkins it would be {JenkinsConnectionId}:{JenkinsDefinitionId}, for TfsOnPrem it would be {TfsOnPremConnectionId}:{ProjectName}:{TfsOnPremDefinitionId}. For third-party artifacts e.g. TeamCity, BitBucket you may refer 'uniqueSourceIdentifier' inside vss-extension.json https://github.com/Microsoft/vsts-rm-extensions/blob/master/Extensions.
:param str artifact_version_id: Releases with given artifactVersionId will be returned. E.g. in case of Build artifactType, it is buildId.
:param str source_branch_filter: Releases with given sourceBranchFilter will be returned.
:param bool is_deleted: Gets the soft deleted releases, if true.
:param [str] tag_filter: A comma-delimited list of tags. Only releases with these tags will be returned.
:param [str] property_filters: A comma-delimited list of extended properties to retrieve.
:rtype: [Release]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if definition_environment_id is not None:
query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int')
if search_text is not None:
query_parameters['searchText'] = self._serialize.query('search_text', search_text, 'str')
if created_by is not None:
query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if environment_status_filter is not None:
query_parameters['environmentStatusFilter'] = self._serialize.query('environment_status_filter', environment_status_filter, 'int')
if min_created_time is not None:
query_parameters['minCreatedTime'] = self._serialize.query('min_created_time', min_created_time, 'iso-8601')
if max_created_time is not None:
query_parameters['maxCreatedTime'] = self._serialize.query('max_created_time', max_created_time, 'iso-8601')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if artifact_type_id is not None:
query_parameters['artifactTypeId'] = self._serialize.query('artifact_type_id', artifact_type_id, 'str')
if source_id is not None:
query_parameters['sourceId'] = self._serialize.query('source_id', source_id, 'str')
if artifact_version_id is not None:
query_parameters['artifactVersionId'] = self._serialize.query('artifact_version_id', artifact_version_id, 'str')
if source_branch_filter is not None:
query_parameters['sourceBranchFilter'] = self._serialize.query('source_branch_filter', source_branch_filter, 'str')
if is_deleted is not None:
query_parameters['isDeleted'] = self._serialize.query('is_deleted', is_deleted, 'bool')
if tag_filter is not None:
tag_filter = ",".join(tag_filter)
query_parameters['tagFilter'] = self._serialize.query('tag_filter', tag_filter, 'str')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Release]', self._unwrap_collection(response))
def create_release(self, release_start_metadata, project):
"""CreateRelease.
[Preview API] Create a release.
:param :class:`<ReleaseStartMetadata> <release.v4_0.models.ReleaseStartMetadata>` release_start_metadata: Metadata to create a release.
:param str project: Project ID or project name
:rtype: :class:`<Release> <release.v4_0.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_start_metadata, 'ReleaseStartMetadata')
response = self._send(http_method='POST',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def delete_release(self, project, release_id, comment=None):
"""DeleteRelease.
[Preview API] Soft delete a release
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param str comment: Comment for deleting a release.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
self._send(http_method='DELETE',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def get_release(self, project, release_id, include_all_approvals=None, property_filters=None):
"""GetRelease.
[Preview API] Get a Release
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param bool include_all_approvals: Include all approvals in the result. Default is 'true'.
:param [str] property_filters: A comma-delimited list of properties to include in the results.
:rtype: :class:`<Release> <release.v4_0.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if include_all_approvals is not None:
query_parameters['includeAllApprovals'] = self._serialize.query('include_all_approvals', include_all_approvals, 'bool')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Release', response)
def get_release_definition_summary(self, project, definition_id, release_count, include_artifact=None, definition_environment_ids_filter=None):
"""GetReleaseDefinitionSummary.
[Preview API] Get release summary of a given definition Id.
:param str project: Project ID or project name
:param int definition_id: Id of the definition to get release summary.
:param int release_count: Count of releases to be included in summary.
:param bool include_artifact: Include artifact details.Default is 'false'.
:param [int] definition_environment_ids_filter:
:rtype: :class:`<ReleaseDefinitionSummary> <release.v4_0.models.ReleaseDefinitionSummary>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if release_count is not None:
query_parameters['releaseCount'] = self._serialize.query('release_count', release_count, 'int')
if include_artifact is not None:
query_parameters['includeArtifact'] = self._serialize.query('include_artifact', include_artifact, 'bool')
if definition_environment_ids_filter is not None:
definition_environment_ids_filter = ",".join(map(str, definition_environment_ids_filter))
query_parameters['definitionEnvironmentIdsFilter'] = self._serialize.query('definition_environment_ids_filter', definition_environment_ids_filter, 'str')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReleaseDefinitionSummary', response)
def get_release_revision(self, project, release_id, definition_snapshot_revision, **kwargs):
"""GetReleaseRevision.
[Preview API] Get release for a given revision number.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int definition_snapshot_revision: Definition snapshot revision number.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if definition_snapshot_revision is not None:
query_parameters['definitionSnapshotRevision'] = self._serialize.query('definition_snapshot_revision', definition_snapshot_revision, 'int')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def undelete_release(self, project, release_id, comment):
"""UndeleteRelease.
[Preview API] Undelete a soft deleted release.
:param str project: Project ID or project name
:param int release_id: Id of release to be undeleted.
:param str comment: Any comment for undeleting.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
self._send(http_method='PUT',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
def update_release(self, release, project, release_id):
"""UpdateRelease.
[Preview API] Update a complete release object.
:param :class:`<Release> <release.v4_0.models.Release>` release: Release object for update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <release.v4_0.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release, 'Release')
response = self._send(http_method='PUT',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def update_release_resource(self, release_update_metadata, project, release_id):
"""UpdateReleaseResource.
[Preview API] Update few properties of a release.
:param :class:`<ReleaseUpdateMetadata> <release.v4_0.models.ReleaseUpdateMetadata>` release_update_metadata: Properties of release to update.
:param str project: Project ID or project name
:param int release_id: Id of the release to update.
:rtype: :class:`<Release> <release.v4_0.models.Release>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(release_update_metadata, 'ReleaseUpdateMetadata')
response = self._send(http_method='PATCH',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='4.0-preview.4',
route_values=route_values,
content=content)
return self._deserialize('Release', response)
def get_release_settings(self, project):
"""GetReleaseSettings.
[Preview API] Gets the release settings
:param str project: Project ID or project name
:rtype: :class:`<ReleaseSettings> <release.v4_0.models.ReleaseSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='c63c3718-7cfd-41e0-b89b-81c1ca143437',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('ReleaseSettings', response)
def update_release_settings(self, release_settings, project):
"""UpdateReleaseSettings.
[Preview API] Updates the release settings
:param :class:`<ReleaseSettings> <release.v4_0.models.ReleaseSettings>` release_settings:
:param str project: Project ID or project name
:rtype: :class:`<ReleaseSettings> <release.v4_0.models.ReleaseSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(release_settings, 'ReleaseSettings')
response = self._send(http_method='PUT',
location_id='c63c3718-7cfd-41e0-b89b-81c1ca143437',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ReleaseSettings', response)
def get_definition_revision(self, project, definition_id, revision, **kwargs):
"""GetDefinitionRevision.
[Preview API]
:param str project: Project ID or project name
:param int definition_id:
:param int revision:
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
if revision is not None:
route_values['revision'] = self._serialize.url('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc',
version='4.0-preview.1',
route_values=route_values,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_release_definition_history(self, project, definition_id):
"""GetReleaseDefinitionHistory.
[Preview API] Get revision history for a release definition
:param str project: Project ID or project name
:param int definition_id: Id of the definition.
:rtype: [ReleaseDefinitionRevision]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
response = self._send(http_method='GET',
location_id='258b82e0-9d41-43f3-86d6-fef14ddd44bc',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[ReleaseDefinitionRevision]', self._unwrap_collection(response))
def get_summary_mail_sections(self, project, release_id):
"""GetSummaryMailSections.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:rtype: [SummaryMailSection]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='224e92b2-8d13-4c14-b120-13d877c516f8',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[SummaryMailSection]', self._unwrap_collection(response))
def send_summary_mail(self, mail_message, project, release_id):
"""SendSummaryMail.
[Preview API]
:param :class:`<MailMessage> <release.v4_0.models.MailMessage>` mail_message:
:param str project: Project ID or project name
:param int release_id:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(mail_message, 'MailMessage')
self._send(http_method='POST',
location_id='224e92b2-8d13-4c14-b120-13d877c516f8',
version='4.0-preview.1',
route_values=route_values,
content=content)
def get_source_branches(self, project, definition_id):
"""GetSourceBranches.
[Preview API]
:param str project: Project ID or project name
:param int definition_id:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
response = self._send(http_method='GET',
location_id='0e5def23-78b3-461f-8198-1558f25041c8',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_definition_tag(self, project, release_definition_id, tag):
"""AddDefinitionTag.
[Preview API] Adds a tag to a definition
:param str project: Project ID or project name
:param int release_definition_id:
:param str tag:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_definition_id is not None:
route_values['releaseDefinitionId'] = self._serialize.url('release_definition_id', release_definition_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='PATCH',
location_id='3d21b4c8-c32e-45b2-a7cb-770a369012f4',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_definition_tags(self, tags, project, release_definition_id):
"""AddDefinitionTags.
[Preview API] Adds multiple tags to a definition
:param [str] tags:
:param str project: Project ID or project name
:param int release_definition_id:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_definition_id is not None:
route_values['releaseDefinitionId'] = self._serialize.url('release_definition_id', release_definition_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='3d21b4c8-c32e-45b2-a7cb-770a369012f4',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_definition_tag(self, project, release_definition_id, tag):
"""DeleteDefinitionTag.
[Preview API] Deletes a tag from a definition
:param str project: Project ID or project name
:param int release_definition_id:
:param str tag:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_definition_id is not None:
route_values['releaseDefinitionId'] = self._serialize.url('release_definition_id', release_definition_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='DELETE',
location_id='3d21b4c8-c32e-45b2-a7cb-770a369012f4',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_definition_tags(self, project, release_definition_id):
"""GetDefinitionTags.
[Preview API] Gets the tags for a definition
:param str project: Project ID or project name
:param int release_definition_id:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_definition_id is not None:
route_values['releaseDefinitionId'] = self._serialize.url('release_definition_id', release_definition_id, 'int')
response = self._send(http_method='GET',
location_id='3d21b4c8-c32e-45b2-a7cb-770a369012f4',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_release_tag(self, project, release_id, tag):
"""AddReleaseTag.
[Preview API] Adds a tag to a releaseId
:param str project: Project ID or project name
:param int release_id:
:param str tag:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='PATCH',
location_id='c5b602b6-d1b3-4363-8a51-94384f78068f',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_release_tags(self, tags, project, release_id):
"""AddReleaseTags.
[Preview API] Adds tag to a release
:param [str] tags:
:param str project: Project ID or project name
:param int release_id:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='c5b602b6-d1b3-4363-8a51-94384f78068f',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_release_tag(self, project, release_id, tag):
"""DeleteReleaseTag.
[Preview API] Deletes a tag from a release
:param str project: Project ID or project name
:param int release_id:
:param str tag:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='DELETE',
location_id='c5b602b6-d1b3-4363-8a51-94384f78068f',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_release_tags(self, project, release_id):
"""GetReleaseTags.
[Preview API] Gets the tags for a release
:param str project: Project ID or project name
:param int release_id:
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
response = self._send(http_method='GET',
location_id='c5b602b6-d1b3-4363-8a51-94384f78068f',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_tags(self, project):
"""GetTags.
[Preview API]
:param str project: Project ID or project name
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='86cee25a-68ba-4ba3-9171-8ad6ffc6df93',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_tasks(self, project, release_id, environment_id, attempt_id=None):
"""GetTasks.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:rtype: [ReleaseTask]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
query_parameters = {}
if attempt_id is not None:
query_parameters['attemptId'] = self._serialize.query('attempt_id', attempt_id, 'int')
response = self._send(http_method='GET',
location_id='36b276e0-3c70-4320-a63c-1a2e1466a0d1',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseTask]', self._unwrap_collection(response))
def get_tasks_for_task_group(self, project, release_id, environment_id, release_deploy_phase_id):
"""GetTasksForTaskGroup.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int release_deploy_phase_id:
:rtype: [ReleaseTask]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if release_deploy_phase_id is not None:
route_values['releaseDeployPhaseId'] = self._serialize.url('release_deploy_phase_id', release_deploy_phase_id, 'int')
response = self._send(http_method='GET',
location_id='4259191d-4b0a-4409-9fb3-09f22ab9bc47',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('[ReleaseTask]', self._unwrap_collection(response))
def get_artifact_type_definitions(self, project):
"""GetArtifactTypeDefinitions.
[Preview API]
:param str project: Project ID or project name
:rtype: [ArtifactTypeDefinition]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='8efc2a3c-1fc8-4f6d-9822-75e98cecb48f',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('[ArtifactTypeDefinition]', self._unwrap_collection(response))
def get_artifact_versions(self, project, release_definition_id):
"""GetArtifactVersions.
[Preview API]
:param str project: Project ID or project name
:param int release_definition_id:
:rtype: :class:`<ArtifactVersionQueryResult> <release.v4_0.models.ArtifactVersionQueryResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if release_definition_id is not None:
query_parameters['releaseDefinitionId'] = self._serialize.query('release_definition_id', release_definition_id, 'int')
response = self._send(http_method='GET',
location_id='30fc787e-a9e0-4a07-9fbc-3e903aa051d2',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ArtifactVersionQueryResult', response)
def get_artifact_versions_for_sources(self, artifacts, project):
"""GetArtifactVersionsForSources.
[Preview API]
:param [Artifact] artifacts:
:param str project: Project ID or project name
:rtype: :class:`<ArtifactVersionQueryResult> <release.v4_0.models.ArtifactVersionQueryResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(artifacts, '[Artifact]')
response = self._send(http_method='POST',
location_id='30fc787e-a9e0-4a07-9fbc-3e903aa051d2',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ArtifactVersionQueryResult', response)
def get_release_work_items_refs(self, project, release_id, base_release_id=None, top=None):
"""GetReleaseWorkItemsRefs.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int base_release_id:
:param int top:
:rtype: [ReleaseWorkItemRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if base_release_id is not None:
query_parameters['baseReleaseId'] = self._serialize.query('base_release_id', base_release_id, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='4f165cc0-875c-4768-b148-f12f78769fab',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ReleaseWorkItemRef]', self._unwrap_collection(response))
|
"""Unit tests for the JMeter CSV tests collector."""
from .base import JMeterCSVTestCase
class JMeterCSVTestsTest(JMeterCSVTestCase):
"""Unit tests for the JMeter CSV tests collector."""
METRIC_TYPE = "tests"
async def test_no_transactions(self):
"""Test that the number of tests is 0 if there are no transactions in the CSV."""
response = await self.collect(get_request_text=self.JMETER_CSV.splitlines()[0])
self.assert_measurement(response, value="0")
async def test_all_samples(self):
"""Test retrieving all samples."""
response = await self.collect(get_request_text=self.JMETER_CSV)
self.assert_measurement(response, value="4", entities=[])
async def test_failed_samples(self):
"""Test retrieving the failed samples."""
self.set_source_parameter("test_result", ["failed"])
response = await self.collect(get_request_text=self.JMETER_CSV)
self.assert_measurement(response, value="1", entities=[])
async def test_successful_samples(self):
"""Test retrieving the successful samples."""
self.set_source_parameter("test_result", ["success"])
response = await self.collect(get_request_text=self.JMETER_CSV)
self.assert_measurement(response, value="3", entities=[])
async def test_ignore_transaction(self):
"""Test that a transaction can be ignored."""
self.set_source_parameter("transactions_to_ignore", ["/home"])
response = await self.collect(get_request_text=self.JMETER_CSV)
self.assert_measurement(response, value="2", entities=[])
async def test_include_transaction(self):
"""Test that a transaction can be included."""
self.set_source_parameter("transactions_to_include", ["/home"])
response = await self.collect(get_request_text=self.JMETER_CSV)
self.assert_measurement(response, value="2", entities=[])
|
# -*- coding: utf-8 -*-
#################################################################################
# Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# Copyright(c): 2015-Present Webkul Software Pvt. Ltd.
# License URL : https://store.webkul.com/license.html/
# All Rights Reserved.
#
#
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
from datetime import datetime, timedelta
from lxml import etree
from openerp.osv.orm import setup_modifiers
import openerp.addons.decimal_precision as dp
import decimal
import logging
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_invoice_paid(self):
res = super(AccountInvoice, self).action_invoice_paid()
for rec in self:
if rec.state == "paid":
mp_membership_obj = self.env["seller.membership"].sudo().search([('account_invoice_id', '=', rec.id)])
for mp_member in mp_membership_obj:
mp_member.disable_all_make_active_membership()
mp_membership_plan_dates = mp_member.mp_membership_plan_id.product_tmpl_id.get_mp_membership_plan_date_range()
if mp_membership_plan_dates:
mp_member.date_from = mp_membership_plan_dates.get("date_from", False)
mp_member.date_to = mp_membership_plan_dates.get("date_to", False)
return res
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
@api.model
def create(self, vals):
invoice_line_obj = super(AccountInvoiceLine, self).create(vals)
mp_membership_obj = self.env['seller.membership'].sudo()
if invoice_line_obj.invoice_id.type == 'out_invoice' and invoice_line_obj.product_id.wk_mp_membership and not mp_membership_obj.search([('account_invoice_line_id', '=', invoice_line_obj.id)]):
# Product in line is a marketplace membership product
mp_membership_plan_dates = invoice_line_obj.product_id.product_tmpl_id.get_mp_membership_plan_date_range()
date_from = mp_membership_plan_dates.get("date_from", False)
date_to = mp_membership_plan_dates.get("date_to", False)
if invoice_line_obj.invoice_id.date_invoice and invoice_line_obj.invoice_id.date_invoice > date_from and invoice_line_obj.invoice_id.date_invoice < date_to:
mp_membership_plan_dates = invoice_line_obj.product_id.product_tmpl_id.get_mp_membership_plan_date_range(date=invoice_line_obj.invoice_id.date_invoice)
date_from = mp_membership_plan_dates.get("date_from", False)
date_to = mp_membership_plan_dates.get("date_to", False)
# date_from = invoice_line_obj.invoice_id.date_invoice
mp_membership_obj.sudo().create({
'partner_id': invoice_line_obj.invoice_id.partner_id.id,
'mp_membership_plan_id': invoice_line_obj.product_id.id,
'mp_membership_fee': invoice_line_obj.price_unit,
'date': fields.Date.today(),
'mp_membership_date_from': date_from,
'mp_membership_date_to': date_to,
'account_invoice_line_id': invoice_line_obj.id,
'no_of_product': invoice_line_obj.product_id.no_of_product,
'order_line_id': invoice_line_obj.sale_line_ids.ids[0] if invoice_line_obj.sale_line_ids else False
})
#Untick seller from Free member
invoice_line_obj.invoice_id.partner_id.free_membership = False
invoice_line_obj.invoice_id.message_partner_ids = [(4, invoice_line_obj.invoice_id.partner_id.id)]
return invoice_line_obj
|
'''
1) 브루스 포트로 하면 O(n*n)
2) 2 포인터로 풀이할 수 있을 듯?
'''
from typing import List
class SolutionTwoPointer:
def maxArea(self, height: List[int]) -> int:
left = 0
right = len(height) - 1
max_volume = 0
while left < right:
max_volume = max(min(height[left], height[right]) * (right - left), max_volume)
if height[left] < height[right]:
left += 1
else:
right -= 1
return max_volume
class SolutionBruteForce:
def maxArea(self, height: List[int]) -> int:
max_water = 0
n = len(height)
for left_wall in range(n - 1):
for right_wall in range(left_wall + 1, n):
water = min(height[left_wall], height[right_wall]) * (right_wall - left_wall)
max_water = max(water, max_water)
return max_water
|
import matplotlib
matplotlib.use('Agg')
import datetime
import os
import random
import time
import gc
import numpy as np
from PIL import Image
from skimage import io as skio
import torch
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from datasets import list_dataset
from models import *
from utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import sys
cudnn.benchmark = True
outp_path = './outputs'
conv_name = sys.argv[1]
fold_name = sys.argv[2]
data_name = sys.argv[3]
task_name = sys.argv[4]
exp_name = conv_name + '_' + data_name + '_' + task_name + '_' + fold_name
def normalize_rows(array):
sum = array.sum(axis=1)
new = np.zeros(array.shape)
for i in range(array.shape[0]):
new[i] = array[i]/sum[i]
return new
def main():
if (conv_name == 'segnet'):
net = segnet(num_classes=list_dataset.num_classes, in_channels=3).cuda()
model_path = './ckpt/' + exp_name + '/best.pth'
net.load_state_dict(torch.load(model_path))
#print('net', net)
#net = net.cuda()
net.eval()
batch_size = 1
num_workers = 1
test_set = []
test_loader = []
test_set = list_dataset.ListDataset('test', data_name, task_name, fold_name, 'statistical')
test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=num_workers, shuffle=False)
check_mkdir(outp_path)
check_mkdir(os.path.join(outp_path, exp_name))
test(test_loader, net, task_name)
def test(test_loader, net, task_name):
net.eval()
check_mkdir(os.path.join(outp_path, exp_name, 'best'))
criterion = CrossEntropyLoss2d(size_average=False).cuda()
hr_preds = []
hr_gts = []
bicubic_preds = []
bicubic_gts = []
sr_preds = []
sr_gts = []
for vi, data in enumerate(test_loader):
inputs, gts, img_name = data
inputs = inputs.float()
inputs, gts = inputs.cuda(), gts.cuda()
#inputs = inputs.sub(inputs.mean()).div(inputs.std())
N = inputs.size(0)
inputs = Variable(inputs).cuda()
gts = Variable(gts).cuda()
outputs = net(inputs)
predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
loss = criterion(outputs, gts)
if 'bicubic' in img_name[0]:
bicubic_preds.append(predictions)
bicubic_gts.append(gts.data.squeeze_(0).cpu().numpy())
elif 'result' in img_name[0]:
sr_preds.append(predictions)
sr_gts.append(gts.data.squeeze_(0).cpu().numpy())
else:
hr_preds.append(predictions)
hr_gts.append(gts.data.squeeze_(0).cpu().numpy())
acc, acc_cls, mean_iou, iou, fwavacc, kappa = evaluate([predictions], [gts.data.squeeze_(0).cpu().numpy()], list_dataset.num_classes, task_name)
print(img_name[0], loss.item())
print('[acc %.4f], [acc_cls %.4f], [iou %.4f], [fwavacc %.4f], [kappa %.4f]' % (acc, acc_cls, mean_iou, fwavacc, kappa))
tmp_path = os.path.join(outp_path, exp_name, 'best', img_name[0])
prds = predictions
h, w = prds.shape
if data_name == 'grss_semantic':
new = np.zeros((h, w, 3), dtype=np.uint8)
label = gts.data.squeeze_(0).cpu().numpy()
for i in range(h):
for j in range(w):
if label[i][j] == -1:
new[i][j] = [0,0,0]
elif prds[i][j] == 0:
new[i][j] = [255,0,255]
elif prds[i][j] == 1:
new[i][j] = [0,255,0]
elif prds[i][j] == 2:
new[i][j] = [255,0,0]
elif prds[i][j] == 3:
new[i][j] = [0,255,255]
elif prds[i][j] == 4:
new[i][j] = [160,32,240]
elif prds[i][j] == 5:
new[i][j] = [46,139,87]
else:
sys.exit('Invalid prediction')
skio.imsave(tmp_path+'.png', new)
elif data_name == 'coffee_1_semantic' or data_name == 'coffee_2_semantic' or data_name == 'coffee_3_semantic':
new = np.zeros((h, w), dtype=np.uint8)
for i in range(h):
for j in range(w):
if prds[i][j] == 0:
new[i][j] = 0
elif prds[i][j] == 1:
new[i][j] = 255
else:
sys.exit('Invalid prediction')
skio.imsave(tmp_path+'.png', new)
elif data_name == 'vaihingen_semantic':
new = np.zeros((h, w, 3), dtype=np.uint8)
for x in range(h):
for y in range(w):
if prds[x][y] == 0:
new[x][y] = [255,255,255]
elif prds[x][y] == 1:
new[x][y] = [0,0,255]
elif prds[x][y] == 2:
new[x][y] = [0,255,255]
elif prds[x][y] == 3:
new[x][y] = [0,255,0]
elif prds[x][y] == 4:
new[x][y] = [255,255,0]
elif prds[x][y] == 5:
new[x][y] = [255,0,0]
else:
sys.exit('Invalid prediction')
skio.imsave(tmp_path+'.png', new)
if data_name == 'grss_semantic':
y_labels = ['Road', 'Tree', 'Red roof', 'Grey roof', 'Concrete\nroof', 'Vegetation']
sr_heatmap = normalize_rows(confusion_matrix(sr_preds, sr_gts, list_dataset.num_classes))
bicubic_heatmap = normalize_rows(confusion_matrix(bicubic_preds, bicubic_gts, list_dataset.num_classes))
hr_heatmap = normalize_rows(confusion_matrix(hr_preds, hr_gts, list_dataset.num_classes))
print('\nFinal:')
print('HR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(hr_preds, hr_gts, list_dataset.num_classes, task_name))
print('SR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(sr_preds, sr_gts, list_dataset.num_classes, task_name))
print('Bicubic')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(bicubic_preds, bicubic_gts, list_dataset.num_classes, task_name))
fig = plt.figure(figsize=(6,6))
ax = sns.heatmap(sr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'sr.png')
fig = plt.figure(figsize=(6,6))
ax = sns.heatmap(bicubic_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'bicubic.png')
fig = plt.figure(figsize=(6,6))
ax = sns.heatmap(hr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'hr.png')
elif data_name == 'coffee_1_semantic' or data_name == 'coffee_2_semantic' or data_name == 'coffee_3_semantic':
y_labels = ['non-coffee', 'coffee']
sr_heatmap = normalize_rows(confusion_matrix(sr_preds, sr_gts, list_dataset.num_classes))
bicubic_heatmap = normalize_rows(confusion_matrix(bicubic_preds, bicubic_gts, list_dataset.num_classes))
hr_heatmap = normalize_rows(confusion_matrix(hr_preds, hr_gts, list_dataset.num_classes))
print('\nFinal:')
print('HR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(hr_preds, hr_gts, list_dataset.num_classes, task_name))
print('SR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(sr_preds, sr_gts, list_dataset.num_classes, task_name))
print('Bicubic')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(bicubic_preds, bicubic_gts, list_dataset.num_classes, task_name))
sns.set(font_scale=1.3)
fig = plt.figure(figsize=(3.5,3.5))
ax = sns.heatmap(sr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'sr.png')
fig = plt.figure(figsize=(3.5,3.5))
ax = sns.heatmap(bicubic_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'bicubic.png')
fig = plt.figure(figsize=(3.5,3.5))
ax = sns.heatmap(hr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'hr.png')
elif data_name == 'vaihingen_semantic':
y_labels = ['Impervious\nsurfaces', 'Building', 'Low\nvegetation', 'Tree', 'Car']
sr_heatmap = normalize_rows(confusion_matrix(sr_preds, sr_gts, list_dataset.num_classes))
bicubic_heatmap = normalize_rows(confusion_matrix(bicubic_preds, bicubic_gts, list_dataset.num_classes))
hr_heatmap = normalize_rows(confusion_matrix(hr_preds, hr_gts, list_dataset.num_classes))
sr_heatmap = np.delete(sr_heatmap, -1, axis=0)
sr_heatmap = np.delete(sr_heatmap, -1, axis=1)
bicubic_heatmap = np.delete(bicubic_heatmap, -1, axis=0)
bicubic_heatmap = np.delete(bicubic_heatmap, -1, axis=1)
hr_heatmap = np.delete(hr_heatmap, -1, axis=0)
hr_heatmap = np.delete(hr_heatmap, -1, axis=1)
print('\nFinal:')
print('HR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(hr_preds, hr_gts, list_dataset.num_classes, task_name))
print('SR')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(sr_preds, sr_gts, list_dataset.num_classes, task_name))
print('Bicubic')
print('acc: %.4f\nacc_cls: %.4f\nmean_iou: %.4f\niou: %s\nfwavacc: %.4f\nkappa: %.4f' % evaluate(bicubic_preds, bicubic_gts, list_dataset.num_classes, task_name))
fig = plt.figure(figsize=(5,5))
ax = sns.heatmap(sr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'sr.png')
fig = plt.figure(figsize=(5,5))
ax = sns.heatmap(bicubic_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'bicubic.png')
fig = plt.figure(figsize=(5,5))
ax = sns.heatmap(hr_heatmap, linewidth=0.5, cmap='Blues', annot=True, yticklabels=y_labels, xticklabels=False)
fig.savefig('heat_maps/'+exp_name+'/'+'hr.png')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from __future__ import print_function
"""
Process a FireEye alerts API export csv and convert each row to an incident report and submitting to TruSTAR:
"""
import json
import sys
import time
import pandas as pd
from trustar import TruStar, Report
# Set to false to submit to community
do_enclave_submissions = True
def filter_false_positive(df, process_time):
"""
method that takes in FireEye (FE) alerts and filters FE-tests and False Positives
:param process_time:
:param df:
:return:
"""
result = []
track = []
count = 0
for o in df['alerts']:
count += 1
if 'closedState' in o:
if o['closedState'] != 'False Positive':
if 'distinguishers' in o:
try:
if 'virus' in o['distinguishers']:
if o['distinguishers']['virus'] != 'fetestevent':
result.append(o)
else:
track.append(o) # Track fetestevents that are skipped
else:
result.append(o)
except TypeError:
result.append(o)
else:
result.append(o)
else:
track.append(o) # Track false positives that are skipped
elif 'distinguishers' in o:
try:
if 'virus' in o['distinguishers']:
if o['distinguishers']['virus'] != 'fetestevent':
result.append(o)
else:
track.append(o) # Track fetestevents that are skipped
else:
result.append(o)
except TypeError:
result.append(o)
trackfile = open('tracking_fetest_' + process_time + '.txt', 'w')
numskip = 1
for item in track:
trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item))
numskip += 1
return result
def filter_win_methodology(df, process_time):
"""
A function that filters out the windows methodology alert data obtained from FireEye
:param df: a DataFrame object
:param process_time:
:return:
"""
result = []
track = []
for o in df:
if 'WINDOWS METHODOLOGY' in o['message']:
track.append(o)
else:
result.append(o)
trackfile = open('tracking_winMethodology_' + process_time + '.txt', 'w')
numskip = 1
for item in track:
trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item))
numskip += 1
return result
def filter_bash_shellshock(df, process_time):
"""
A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye
:param df: a DataFrame object
:param process_time:
:return:
"""
result = []
track = []
for o in df:
if 'BASH [Shellshock HTTP]' in o['message']:
track.append(o)
else:
result.append(o)
trackfile = open('tracking_bashShellShock_' + process_time + '.txt', 'w')
numskip = 1
for item in track:
trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item))
numskip += 1
return result
def filter_webapp_attack(df, process_time):
"""
A function that filters out the BASH SHELLSHOCK alert data obtained from FireEye
:param df: a DataFrame object
:param process_time:
:return:
"""
result = []
track = []
for o in df:
if 'METHODOLOGY - WEB APP ATTACK' in o['message']:
track.append(o)
else:
result.append(o)
trackfile = open('tracking_webAppAttack_' + process_time + '.txt', 'w')
numskip = 1
for item in track:
trackfile.write("\n\n**** {:d}: Display ID {} ****\n\n{}".format(numskip, item['displayId'], item))
numskip += 1
return result
def process_alert(file_name):
"""
A function that removes the alerts property from the FireEye alert and transform the data into a JSON ready format
:param file_name:
:return:
"""
processed_line = open(file_name, 'r').read()
char_pos = processed_line.find("}")
new_line = "{" + processed_line[char_pos + 2:]
return new_line
def main(inputfile):
ts = TruStar()
df = pd.read_json(process_alert(inputfile))
process_time = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time()))
filtered_falsepositive = filter_false_positive(df, process_time)
filtered_winmethodology = filter_win_methodology(filtered_falsepositive, process_time)
filtered_bashshellshock = filter_bash_shellshock(filtered_winmethodology, process_time)
filtered_data = filter_webapp_attack(filtered_bashshellshock, process_time)
all_reports = []
for alert in filtered_data:
# construct report body
content = ""
for key in alert:
type_value = type(alert[key])
if type_value == list or type_value == int or type_value == long or type_value == bool \
or type_value == dict or alert[key] is None:
content += key + ': ' + str(alert[key]).replace('u\'', '\'') + '\n'
else:
content += key + ': ' + str(alert[key].encode('ascii', 'ignore')) + '\n'
# construct and append report
current_report = Report(title=str(alert['displayId']) + ' ' + str(alert['message'].encode('utf-8')),
body=content,
time_began=str(alert['createDate']),
is_enclave=True,
enclave_ids=ts.enclave_ids)
all_reports.append(current_report)
if do_enclave_submissions:
for staged_report in all_reports:
start_time = time.time()
try:
report = ts.submit_report(report=staged_report)
print(report)
except Exception as e:
print("Submission failed with error: {}".format(str(e)))
raise
end_time = time.time()
delta_time = end_time - start_time
print("Submitted report title {} as TruSTAR IR {}".format(report.title, report.id) +
" Time: " + str(delta_time))
if report.indicators is not None:
print("Extracted the following indicators: {}"
.format(json.dumps([indicator.to_dict() for indicator in report.indicators], indent=2)))
print()
time.sleep(3)
if __name__ == '__main__':
main(sys.argv[1])
|
from tkinter import *
# 导入ttk
from tkinter import ttk
from tkinter import colorchooser
import math
class App:
def __init__(self, master):
self.master = master
# 保存设置初始的边框宽度
self.width = IntVar()
self.width.set(1)
# 保存设置初始的边框颜色
self.outline = 'black'
# 保存设置初始的填充颜色
self.fill = None
# 记录拖动时前一个点的x、y坐标
self.prevx = self.prevy = -10
# 记录拖动开始的第一个点的x、y坐标
self.firstx = self.firsty = -10
# 记录拖动右键来移动图形时前一个点的x、y坐标
self.mv_prevx = self.mv_prevy = -10
# item_type记录要绘制哪种图形
self.item_type = 0
self.points = []
self.init_widgets()
self.temp_item = None
self.temp_items = []
# 初始化选中的图形项
self.choose_item = None
# 创建界面组件
def init_widgets(self):
self.cv = Canvas(root, background='white')
self.cv.pack(fill=BOTH, expand=True)
# 为鼠标左键拖动事件、鼠标左键释放事件绑定处理函数
self.cv.bind('<B1-Motion>', self.drag_handler)
self.cv.bind('<ButtonRelease-1>', self.release_handler)
# 为鼠标左键双击事件绑定处理函数
self.cv.bind('<Double-1>', self.double_handler)
f = ttk.Frame(self.master)
f.pack(fill=X)
self.bns = []
# 采用循环创建多个按钮,用于绘制不同的图形
for i, lb in enumerate(('直线', '矩形', '椭圆', '多边形', '铅笔')):
bn = Button(f, text=lb, command=lambda i=i: self.choose_type(i))
bn.pack(side=LEFT, ipadx=8, ipady=5, padx=5)
self.bns.append(bn)
# 默认选中直线
self.bns[self.item_type]['relief'] = SUNKEN
ttk.Button(f, text='边框颜色',
command=self.choose_outline).pack(side=LEFT, ipadx=8, ipady=5, padx=5)
ttk.Button(f, text='填充颜色',
command=self.choose_fill).pack(side=LEFT, ipadx=8, ipady=5, padx=5)
om = ttk.OptionMenu(f,
self.width, # 绑定变量
'1', # 设置初始选中值
'0', # 以下多个值用于设置菜单项
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
command=None)
om.pack(side=LEFT, ipadx=8, ipady=5, padx=5)
def choose_type(self, i):
# 将所有按钮恢复默认状态
for b in self.bns: b['relief'] = RAISED
# 将当前按钮设置选中样式
self.bns[i]['relief'] = SUNKEN
# 设置要绘制的图形
self.item_type = i
# 处理选择边框颜色的方法
def choose_outline(self):
# 弹出颜色选择对话框
select_color = colorchooser.askcolor(parent=self.master,
title="请选择边框颜色", color=self.outline)
if select_color is not None:
self.outline = select_color[1]
# 处理选择填充颜色的方法
def choose_fill(self):
# 弹出颜色选择对话框
select_color = colorchooser.askcolor(parent=self.master,
title="请选择填充颜色", color=self.fill)
if select_color is not None:
self.fill = select_color[1]
else:
self.fill = None
def drag_handler(self, event):
# 如果是绘制直线
if self.item_type == 0:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
# 重新绘制虚线
self.temp_item = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, dash=2)
# 如果是绘制矩形或椭圆
if self.item_type == 1:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
leftx, lefty = min(self.firstx, event.x), min(self.firsty, event.y)
rightx, righty = max(self.firstx, event.x), max(self.firsty, event.y)
# 重新绘制虚线选择框
self.temp_item = self.cv.create_rectangle(leftx, lefty, rightx, righty,
dash=2)
if self.item_type == 2:
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
radius = math.sqrt((event.x-self.firstx)**2+(event.y-self.firsty)**2)
leftx, lefty = self.firstx-radius,self.firsty-radius
rightx, righty = event.x + radius,event.y+radius
self.temp_item = self.cv.create_oval(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get(),dash=2)
if self.item_type == 3:
self.draw_polygon = True
# 如果第一个点不存在(self.firstx 和 self.firsty都小于0)
if self.firstx < -1 and self.firsty < -1:
self.firstx, self.firsty = event.x, event.y
# 删除上一次绘制的虚线图形
if self.temp_item is not None:
self.cv.delete(self.temp_item)
# 重新绘制虚线
self.temp_item = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, dash=2)
if self.item_type == 4:
# 如果前一个点存在(self.prevx 和 self.prevy都大于0)
if self.prevx > 0 and self.prevy > 0:
self.cv.create_line(self.prevx, self.prevy, event.x, event.y,
fill=self.outline, width=self.width.get())
self.prevx, self.prevy = event.x, event.y
def item_bind(self, t):
# 为鼠标右键拖动事件绑定处理函数
self.cv.tag_bind(t, '<B3-Motion>', self.move)
# 为鼠标右键释放事件绑定处理函数
self.cv.tag_bind(t, '<ButtonRelease-3>', self.move_end)
def release_handler(self, event):
# 删除临时绘制的虚线图形项
if self.temp_item is not None:
# 如果不是绘制多边形
if self.item_type != 3:
self.cv.delete(self.temp_item)
# 如果绘制多边形,将之前绘制的虚线先保存下来,以便后面删除它们
else:
self.temp_items.append(self.temp_item)
self.temp_item = None
# 如果是绘制直线
if self.item_type == 0:
# 如果第一个点存在(self.firstx 和 self.firsty都大于0)
if self.firstx > 0 and self.firsty > 0:
# 绘制实际的直线
t = self.cv.create_line(self.firstx, self.firsty,
event.x, event.y, fill=self.outline, width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
# 如果是绘制矩形或椭圆
if self.item_type == 1 or self.item_type == 2:
# 如果第一个点存在(self.firstx 和 self.firsty都大于0)
if self.firstx > 0 and self.firsty > 0:
leftx, lefty = min(self.firstx, event.x), min(self.firsty, event.y)
rightx, righty = max(self.firstx, event.x), max(self.firsty, event.y)
if self.item_type == 1:
# 绘制实际的矩形
t = self.cv.create_rectangle(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get())
if self.item_type == 2:
# 绘制实际的椭圆
t = self.cv.create_oval(leftx, lefty, rightx, righty,
outline=self.outline, fill=self.fill, width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
if self.item_type != 3:
self.prevx = self.prevy = -10
self.firstx = self.firsty = -10
# 如果正在绘制多边形
elif (self.draw_polygon):
# 将第一个点添加到列表中
self.points.append((self.firstx, self.firsty))
self.firstx, self.firsty = event.x, event.y
def double_handler(self, event):
# 只处理绘制多边形的情形
if self.item_type == 3:
t = self.cv.create_polygon(*self.points,
outline=self.outline, fill="" if self.fill is None else self.fill,
width=self.width.get())
# 为鼠标左键单击事件绑定处理函数,用于选择被单击的图形项
self.cv.tag_bind(t, '<Button-1>',
lambda event=event, t=t: self.choose_item_handler(event, t))
self.item_bind(t)
# 清空所有保存的点数据
self.points.clear()
# 将self.firstx = self.firsty设置为-10,停止绘制
self.firstx = self.firsty = -10
# 删除所有临时的虚线框
for it in self.temp_items: self.cv.delete(it)
self.temp_items.clear()
self.draw_polygon = False
# 根据传入的参数t来选中对应的图形项
def choose_item_handler(self, event, t):
# 使用self.choose_item保存当前选中项
self.choose_item = t
# 定义移动图形项的方法
def move(self, event):
# 如果被选中图形项不为空,才可以执行移动
if self.choose_item is not None:
# 如果前一个点存在(self.mv_prevx 和 self.mv_prevy都大于0)
if self.mv_prevx > 0 and self.mv_prevy > 0:
# 移动选中的图形项
self.cv.move(self.choose_item, event.x - self.mv_prevx,
event.y - self.mv_prevy)
self.mv_prevx, self.mv_prevy = event.x, event.y
# 结束移动的方法
def move_end(self, event):
self.mv_prevx = self.mv_prevy = -10
def delete_item(self, event):
# 如果被选中的item不为空,删除被选中的图形项
if self.choose_item is not None:
self.cv.delete(self.choose_item)
root = Tk()
root.title("绘图工具")
root.geometry('800x680')
app = App(root)
root.bind('<Delete>', app.delete_item)
root.mainloop() |
"""
Start Project Cars, run python standing.py while doing a race.
"""
import carseour
import time
game = carseour.live()
while True:
for player in game.standing():
print(str(player['position']) + '. ' + player['name'] + ' (' + str(player['lap']) + "/" +
str(game.mLapsInEvent) + ') (' + str(round(player['lap_distance'])) + ')')
time.sleep(10)
print("\n\n\n\n")
|
import numpy as np
nan = np.nan
index_to_label = [
12,
-1,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
-1,
-1,
11,
]
index_to_label = np.array(index_to_label, dtype='int32')
def index_to_label_func(x):
return index_to_label[x]
index_to_label_vec_func = np.vectorize(index_to_label_func)
label_to_index = [
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
15,
0,
]
label_to_index = np.array(label_to_index, dtype='int32')
def label_to_index_func(x):
return label_to_index[x]
index_to_class = [
'Void',
'Sky',
'Building',
'Road',
'Sidewalk',
'Fence',
'Vegetation',
'Pole',
'Car',
'Traffic Sign',
'Pedestrian',
'Bicycle',
'Lanemarking',
'Reserved',
'Reserved',
'Traffic Light']
index_to_color = [
[0, 0, 0],
[128, 128, 128],
[128, 0, 0],
[128, 64, 128],
[0, 0, 192],
[64, 64, 128],
[128, 128, 0],
[192, 192, 128],
[64, 0, 128],
[192, 128, 128],
[64, 64, 0],
[0, 128, 192],
[0, 172, 0],
[nan, nan, nan],
[nan, nan, nan],
[0, 128, 128]]
index_to_color = np.array(index_to_color)
|
n = int(input().rstrip())
def fib(count):
if count == 0 or count == 1:
return 1
else:
return fib(count - 1) + fib(count - 2)
result = [1, 1] + [None]*(n-1)
def fibByMemo(count):
global result
if count == 0 or count == 1:
return result[count]
if result[count] != None:
return result[count]
result[count] = fibByMemo(count - 1) + fibByMemo(count - 2)
return result[count]
result2 = [1,1] + [None]*(n-1)
def findByDynamic(count):
global result2
for i in range(2, count+1):
result2[i] = result2[i-1] + result2[i-2]
print(fib(2))
fibByMemo(n)
print(*result)
findByDynamic(n)
print(*result2)
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
import filters as f
from iota.commands import FilterCommand, RequestFilter
from iota.filters import AddressNoChecksum
__all__ = [
'WereAddressesSpentFromCommand',
]
class WereAddressesSpentFromCommand(FilterCommand):
"""
Executes `wereAddressesSpentFrom` command.
See :py:meth:`iota.api.StrictIota.were_addresses_spent_from`.
"""
command = 'wereAddressesSpentFrom'
def get_request_filter(self):
return WereAddressesSpentFromRequestFilter()
def get_response_filter(self):
pass
class WereAddressesSpentFromRequestFilter(RequestFilter):
def __init__(self):
super(WereAddressesSpentFromRequestFilter, self).__init__(
{
'addresses': (
f.Required
| f.Array
| f.FilterRepeater(
f.Required
| AddressNoChecksum()
| f.Unicode(encoding='ascii', normalize=False)
)
),
}
)
|
temprature = int(input('Temprature: '))
if temprature < 20:
print("Khonake!")
elif temprature == 20:
print("Mizuneh!")
else:
print("Garme!")
|
import torch
from torch import nn
import torch.nn.functional as F
from models.core_layers import SpectralNorm, ConditionalBatchNorm2d
from models.basic_module import init_xavier_uniform
## Generator ResBlockのPost Activation version(original = pre-act)
class GeneratorResidualBlockPostAct(nn.Module):
def __init__(self, in_ch, out_ch, upsampling, n_classes=0):
super().__init__()
self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1)
self.upsampling = upsampling
if n_classes == 0:
self.bn1 = nn.BatchNorm2d(out_ch)
self.bn2 = nn.BatchNorm2d(out_ch)
else:
self.bn1 = ConditionalBatchNorm2d(out_ch, n_classes)
self.bn2 = ConditionalBatchNorm2d(out_ch, n_classes)
if in_ch != out_ch or upsampling > 1:
self.shortcut_conv = nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0)
else:
self.shortcut_conv = None
self.conv1.apply(init_xavier_uniform)
self.conv2.apply(init_xavier_uniform)
def forward(self, inputs, label_onehots=None):
# main
if self.upsampling > 1:
x = F.interpolate(inputs, scale_factor=self.upsampling)
else:
x = inputs
x = self.conv1(x)
if label_onehots is not None:
x = self.bn1(x, label_onehots)
else:
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
if label_onehots is not None:
x = self.bn2(x, label_onehots)
else:
x = self.bn2(x)
x = F.relu(x)
# short cut
if self.upsampling > 1:
shortcut = F.interpolate(inputs, scale_factor=self.upsampling)
else:
shortcut = inputs
if self.shortcut_conv is not None:
shortcut = self.shortcut_conv(shortcut)
# residual add
return x + shortcut
class DiscriminatorResidualBlockPostAct(nn.Module):
def __init__(self, in_ch, out_ch, downsampling):
super().__init__()
self.conv1 = SpectralNorm(nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1))
self.conv2 = SpectralNorm(nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
self.downsampling = downsampling
if in_ch != out_ch or downsampling > 1:
self.shortcut_conv = SpectralNorm(nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0))
else:
self.shortcut_conv = None
self.conv1.apply(init_xavier_uniform)
self.conv2.apply(init_xavier_uniform)
def forward(self, inputs):
if self.downsampling > 1:
x = F.avg_pool2d(inputs, kernel_size=self.downsampling)
shortcut = F.avg_pool2d(inputs, kernel_size=self.downsampling)
else:
x = inputs
shortcut = inputs
if self.shortcut_conv is not None:
shortcut = self.shortcut_conv(shortcut)
x = F.relu(self.conv2(F.relu(self.conv1(x))))
# residual add
return x + shortcut
|
import itertools
import math
from typing import Callable, Optional, Sequence
import numpy as np
import dp_variance
def naive_k_min_variance_subset(k: int, sample: Sequence[float]) -> np.ndarray:
canditate, var = None, math.inf
for k_subset in itertools.combinations(sample, k):
_k_subset = np.array(k_subset)
current_var = np.var(_k_subset)
if current_var < var:
var = current_var
canditate = _k_subset
return canditate # type: ignore
def naive_k_max_variance_subset(k: int, sample: Sequence[float]) -> np.ndarray:
canditate, var = None, -math.inf
for k_subset in itertools.combinations(sample, k):
current_var = np.var(k_subset)
if current_var > var:
var = current_var
canditate = np.array(list(k_subset))
return canditate # type: ignore
def test_k_min_variance_subset_indices(
n: int = 200, seed: Optional[int] = None
) -> None:
rng = np.random.default_rng(seed=seed)
print(rng)
for _ in range(n):
k = rng.integers(low=1, high=15)
sample = rng.random(size=15) * 100 - 50
sample.sort()
subset2 = naive_k_min_variance_subset(k, sample)
subset3 = sample[dp_variance.k_min_variance_subset_indices(k, sample)]
assert np.array_equal(subset2, subset3)
def test_k_max_variance_subset_indices(
n: int = 200, seed: Optional[int] = None
) -> None:
rng = np.random.default_rng(seed)
for _ in range(n):
k = rng.integers(low=1, high=15)
if k == 1:
continue
sample = rng.random(size=15) * 100 - 50
sample.sort()
subset2 = naive_k_max_variance_subset(k, sample)
subset3 = sample[dp_variance.k_max_variance_subset_indices(k, sample)]
assert np.array_equal(subset2, subset3)
def test__mean_var_with(n: int = 100, seed: Optional[int] = None) -> None:
_test__mean_var_with(2, 1.0, seed)
_test__mean_var_with(3, 10.0, seed)
_test__mean_var_with(n, 1e-100, seed)
_test__mean_var_with(n, 1.0, seed)
_test__mean_var_with(n, 1e100, seed)
def _test__mean_var_with(n: int, scale: float, seed: Optional[int]) -> None:
rng = np.random.default_rng(seed)
for _ in range(n):
sample = (rng.random(size=n) - 0.5) * scale
var_without_e = np.var(sample[:-1])
mean_without_e = np.mean(sample[:-1])
e = sample[-1]
(mean, var) = dp_variance._mean_var_with(e, mean_without_e, var_without_e, n)
assert np.isclose(mean, np.mean(sample), rtol=1e-9, atol=1e-18)
assert np.isclose(var, np.var(sample), rtol=1e-9, atol=1e-18)
def test_iteration__mean_var_with(n: int = 100, seed: Optional[int] = None) -> None:
_test_iteration__mean_var_with(1, seed)
_test_iteration__mean_var_with(2, seed)
_test_iteration__mean_var_with(3, seed)
_test_iteration__mean_var_with(n, seed)
def _test_iteration__mean_var_with(n: int, seed: Optional[int]) -> None:
rng = np.random.default_rng(seed)
sample = rng.random(size=n) - 0.5
online_mean = sample[0]
online_var = 0.0
for i in range(1, len(sample)):
(online_mean, online_var) = dp_variance._mean_var_with(
sample[i], online_mean, online_var, i + 1
)
assert np.isclose(online_mean, np.mean(sample), rtol=1e-9, atol=1e-18)
assert np.isclose(online_var, np.var(sample), rtol=1e-9, atol=1e-18)
def test__mean_var_without(n: int = 100, seed: Optional[int] = None) -> None:
_test__mean_var_without(2, 1.0, seed)
_test__mean_var_without(3, 10.0, seed)
_test__mean_var_without(n, 1e-100, seed)
_test__mean_var_without(n, 1.0, seed)
_test__mean_var_without(n, 1e100, seed)
def _test__mean_var_without(n: int, scale: float, seed: Optional[int]) -> None:
rng = np.random.default_rng(seed)
for _ in range(n):
k = rng.integers(low=0, high=n)
sample = (rng.random(size=n) - 0.5) * scale
sample_var = np.var(sample)
sample_mean = np.mean(sample)
e = sample[k]
(mean, var) = dp_variance._mean_var_without(
e=e, var_with_e=sample_var, mean_with_e=sample_mean, n_with_e=n
)
sample_without_e = np.delete(sample, k)
assert np.isclose(mean, np.mean(sample_without_e), rtol=1e-9, atol=1e-18)
assert np.isclose(var, np.var(sample_without_e), rtol=1e-9, atol=1e-18)
def test_iteration__mean_var_without(n: int = 100, seed: Optional[int] = None) -> None:
_test_iteration__mean_var_without(1, seed)
_test_iteration__mean_var_without(2, seed)
_test_iteration__mean_var_without(3, seed)
_test_iteration__mean_var_without(n, seed)
def _test_iteration__mean_var_without(n: int, seed: Optional[int]) -> None:
rng = np.random.default_rng(seed)
sample = rng.random(size=n) - 0.5
online_mean = np.mean(sample)
online_var = np.var(sample)
for i in range(len(sample) - 2):
(online_mean, online_var) = dp_variance._mean_var_without(
sample[i], online_mean, online_var, n - i
)
assert np.isclose(online_mean, np.mean(sample[-2:]), rtol=1e-9, atol=1e-18)
assert np.isclose(online_var, np.var(sample[-2:]), rtol=1e-9, atol=1e-18)
def test_composition__mean_var_with_without(
n: int = 100, scale: float = 1.0, seed: Optional[int] = None
) -> None:
_test_composition__mean_var__mean_var_without(2, scale, seed)
_test_composition__mean_var__mean_var_without(3, scale, seed)
_test_composition__mean_var__mean_var_without(4, scale, seed)
_test_composition__mean_var__mean_var_without(n, scale, seed)
def _test_composition__mean_var__mean_var_without(
n: int, scale: float, seed: Optional[int]
) -> None:
rng = np.random.default_rng(seed)
sample = (rng.random(size=n) - 0.5) * scale
mean = np.mean(sample)
var = np.var(sample)
for i in range(n - 2):
(mean, var) = dp_variance._mean_var_without(sample[i], mean, var, n - i)
for i in range(n - 2):
(mean, var) = dp_variance._mean_var_with(sample[i], mean, var, i + 3)
assert np.isclose(mean, np.mean(sample), rtol=1e-9, atol=1e-18)
assert np.isclose(var, np.var(sample), rtol=1e-9, atol=1e-18)
def _naive_local_sensitivity(
sample: np.ndarray, L: float, U: float, mean: float
) -> float:
std = np.std(sample)
dist_from_std = 0.0
for i in range(sample.size):
old_value = sample[i]
sample[i] = L
v1 = abs(std - np.std(sample))
sample[i] = U
v2 = abs(std - np.std(sample))
sample[i] = mean
v3 = abs(std - np.std(sample))
dist_from_std = max([dist_from_std, v1, v2, v3])
sample[i] = old_value
local_sens = dist_from_std
return local_sens
def test__local_sensitivity(n: int = 200, seed: Optional[int] = None) -> None:
rng = np.random.default_rng(seed)
L = -0.5
U = 0.5
mean = 0.0
for _ in range(n):
k = rng.integers(low=0, high=n)
sample = rng.random(size=n) - 0.5
sample.sort()
wcn = dp_variance._worst_case_k_neighbor(
k, sample, "max_var", L, U, mean, np.std
)
ls1 = _naive_local_sensitivity(wcn, L, U, mean)
ls2 = dp_variance._local_sensitivity(wcn, k, L, U, mean, np.std)
assert np.isclose(ls1, ls2)
wcn = dp_variance._worst_case_k_neighbor(
k, sample, "min_var", L, U, mean, np.std
)
ls1 = _naive_local_sensitivity(wcn, L, U, mean)
ls2 = dp_variance._local_sensitivity(wcn, k, L, U, mean, np.std)
assert np.isclose(ls1, ls2)
def _naive_worst_case_k_neighbor(
k: int,
sample: np.ndarray,
mode: str,
L: float,
U: float,
mean: float,
dispersion: Callable[[np.ndarray], float],
) -> np.ndarray:
"""Compute k-neighbors which are good candidate for having the
maximal local sensitivity among all k-neighbors of `sample`.
"""
if k == 0:
return sample
if mode == "max_var":
max_var_indices = dp_variance.k_max_variance_subset_indices(k, sample)
max_var_complement = sample[
dp_variance._complement(len(sample), max_var_indices)
]
# It doesn't matter where I add the new values, as the variance
# will stay the same.
worst_case = np.concatenate([np.tile(mean, k), max_var_complement])
elif mode == "min_var":
min_var_indices = dp_variance.k_min_variance_subset_indices(k, sample)
min_var_complement = sample[
dp_variance._complement(len(sample), min_var_indices)
]
worst_case_candidates = (
np.concatenate([extr, min_var_complement])
for extr in dp_variance._extreme_value_combinations(k, L, U)
)
worst_case = max(
worst_case_candidates, key=lambda seq: dispersion(seq)
) # type: ignore
else:
raise ValueError("Unsupported mode: {}".format(mode))
return worst_case # type: ignore
def test__worst_case_k_neighbor(n: int = 100, seed: Optional[int] = None) -> None:
seed = 42
rng = np.random.default_rng(seed)
L = -0.5
U = 0.5
mean = 0.0
for _ in range(n):
k = rng.integers(low=0, high=n)
sample = rng.random(size=n) - 0.5
sample.sort()
wcn1 = _naive_worst_case_k_neighbor(k, sample, "max_var", L, U, mean, np.var)
wcn2 = dp_variance._worst_case_k_neighbor(
k, sample, "max_var", L, U, mean, np.var
)
assert np.array_equal(wcn1, wcn2)
wcn3 = _naive_worst_case_k_neighbor(k, sample, "min_var", L, U, mean, np.var)
wcn4 = dp_variance._worst_case_k_neighbor(
k, sample, "min_var", L, U, mean, np.var
)
assert np.array_equal(wcn3, wcn4)
|
# list(map(int, input().split()))
# int(input())
def main():
N, K = list(map(int, input().split()))
P = list(map(int, input().split()))
P.sort()
print(sum(P[:K]))
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.6 on 2021-02-19 13:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CyberUser', '0004_auto_20210219_1930'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='comments',
),
]
|
from django import forms
from django.contrib.auth import login as auth_login
from django.contrib.auth.hashers import make_password
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
from django.http import HttpResponseRedirect
from django.views.generic.base import RedirectView
from django.urls import reverse
import import_string
from auth_token.config import settings
from auth_token.contrib.common.views import LoginView as _LoginView
from auth_token.contrib.common.views import LogoutView as _LogoutView
from auth_token.contrib.common.views import LoginCodeVerificationView as _LoginCodeVerificationView
from auth_token.contrib.is_core_auth.forms import LoginCodeVerificationForm
from auth_token.models import Token
from auth_token.utils import login, takeover
from is_core.generic_views import DefaultCoreViewMixin
from is_core.generic_views.mixins import GetCoreObjViewMixin
class LoginView(_LoginView):
template_name = 'is_core/login.html'
class TwoFactorLoginView(LoginView):
def _generate_and_send_two_factor_code(self):
code = import_string(settings.TWO_FACTOR_CODE_GENERATING_FUNCTION)(Token.TWO_FACTOR_CODE_LENGTH)
import_string(settings.TWO_FACTOR_SENDING_FUNCTION)(self.request.token, code)
self.request.token.two_factor_code = make_password(code, salt=Token.TWO_FACTOR_CODE_SALT)
self.request.token.save()
def _login(self, user, expiration, form):
login(
self.request, user, expiration, allowed_cookie=self.allowed_cookie, allowed_header=self.allowed_header,
two_factor_login=True
)
def get_success_url(self):
return '{url}?{redirect_field_name}={value}'.format(
url=settings.TWO_FACTOR_REDIRECT_URL, redirect_field_name=self.redirect_field_name,
value=self.get_redirect_url()
)
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
"""
self._login(form.get_user(), not form.is_permanent(), form)
try:
# send user the code for second part of authentication process
self._generate_and_send_two_factor_code()
except ValidationError as err:
form._errors[forms.forms.NON_FIELD_ERRORS] = ErrorList([err])
return self.form_invalid(form)
return HttpResponseRedirect(self.get_success_url())
class LogoutView(_LogoutView):
template_name = 'is_core/logged_out.html'
class UserTakeover(GetCoreObjViewMixin, DefaultCoreViewMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
return settings.TAKEOVER_REDIRECT_URL
def get(self, request, *args, **kwargs):
user = self.get_obj()
takeover(request, user)
return super().get(request, *args, **kwargs)
class LoginCodeVerificationView(_LoginCodeVerificationView):
template_name = 'is_core/login.html'
form_class = LoginCodeVerificationForm
def form_valid(self, form):
self.log_successful_request()
auth_login(self.request, form.get_user(), self.request.token.backend)
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
self.log_unsuccessful_request()
return super().form_invalid(form)
def dispatch(self, request, *args, **kwargs):
if self.request.token.is_active:
return super().dispatch(request, *args, **kwargs)
else:
return HttpResponseRedirect('{url}?{redirect_field_name}={value}'.format(
url=reverse('IS:login', ), redirect_field_name=self.redirect_field_name,
value=self.get_redirect_url()
))
|
#!/usr/bin/python
#coding: utf-8
__AUTHOR__ = "Fnkoc"
__LICENSE__= "MIT"
"""
MIT License
Copyright (c) 2017 Franco Colombino
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from random import choice
def generate():
agents = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246",
"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
"Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.1.1; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
"Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/13.10586",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; E6653 Build/32.2.A.0.253) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
"Mozilla/5.0 (Linux; Android 6.0.1; SGP771 Build/32.2.A.0.253; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.1.1; SHIELD Tablet Build/LMY48C) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.0.2; SAMSUNG SM-T550 Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/3.3 Chrome/38.0.2125.102 Safari/537.36",
"Mozilla/5.0 (Linux; Android 4.4.3; KFTHWI Build/KTU84M) AppleWebKit/537.36 (KHTML, like Gecko) Silk/47.1.79 like Chrome/47.0.2526.80 Safari/537.36",
"Mozilla/5.0 (Linux; Android 5.0.2; LG-V410/V41020c Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/34.0.1847.118 Safari/537.36"]
return(choice(agents))
|
from reader import read_data
b = 2 ** 2 * 3 ** 2 * 5 ** 2 * 7 ** 2 * 13
ulamki = read_data()
# ulamki = [("1", "2"), ("2", "3"), ("5", "3"), ("2", "4"), ("15", "5")]
suma_ulamkow = 0
for ulamek in ulamki:
licznik = int(ulamek[0])
mianownik = int(ulamek[1])
nowy_ulamek = (licznik * b) / mianownik
suma_ulamkow += nowy_ulamek
print(f"{suma_ulamkow=}")
|
# import torch
import numpy as np
import os
import pandas as pd
from sklearn.model_selection import train_test_split
os.chdir("/home/vglasov/Reseach/LU-Net-pytorch/")
import config
from tools import preprocess #custom class
from tools import dataloader_tools as data_loader
import open3d as o3d
import torch
from torch.utils.data import DataLoader, Dataset
class batch_loader(Dataset):
def __init__(self, root_dir, augmentation=None):
self.landmarks_frame = pd.read_csv(root_dir)
self.augmentation = augmentation
def _read_labels(self, labels_path):
return [list(map(float, f.split()[4:10])) for f in open(labels_path, "r").readlines()]
def __getitem__(self, idx):
pcd_path = self.landmarks_frame.iloc[idx, 0]
labels_path = self.landmarks_frame.iloc[idx, 1]
pcd = o3d.io.read_point_cloud(pcd_path)
labels_list = self._read_labels(labels_path)
pcd2img = preprocess.Pcd2ImageTransform(augmentation=self.augmentation).fit(pcd, labels_list)
data = pcd2img.transform()
mask = data[:,:,0] != 0
p, n = data_loader.pointnetize(data[:,:,0:4], n_size=[3,3])
groundtruth = data_loader.apply_mask(data[:,:,-1], mask)
return torch.tensor(p, dtype=torch.float).permute(-1, -2, 0, 1),\
torch.tensor(n, dtype=torch.float).permute(-1, -2, 0, 1),\
torch.tensor(mask),\
torch.tensor(groundtruth)
def __len__(self):
return (len(self.landmarks_frame))
# if __name__ == '__main__':
# train_loader = batch_loader(root_dir="data/train.csv")
# val_loader = batch_loader(root_dir="data/test.csv") |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 24 10:08:00 2022
@author: D.Albert-Weiss
inspired by https://lbolla.info/pipelines-in-python.html
"""
from contextlib import contextmanager
class StopPipeline(Exception):
pass
@contextmanager
def close_on_exit(n):
try:
yield
except GeneratorExit:
n.close()
class Pipeline(object):
'''Chain stages together. Assumes the last is the consumer.'''
def __init__(self, *args):
c = Pipeline.C(args[-1])
next(c)
t = c
for stg in reversed(args[1:-1]):
s = Pipeline.S(stg, t)
next(s)
t = s
p = Pipeline.P(args[0], t)
next(p)
self._pipeline = p
def start(self, initial_state):
try:
self._pipeline.send(initial_state)
except StopIteration:
self._pipeline.close()
#return self._pipeline
@staticmethod
def P(f, n):
'''Producer: only .send (and yield as entry point).'''
state = (yield) # get initial state
with close_on_exit(n):
while True:
try:
res,state = f(state)
except StopPipeline:
return
n.send(res)
@staticmethod
def S(f, n):
'''Stage: both (yield) and .send.'''
while True:
r = (yield)
n.send(f(r))
@staticmethod
def C(f):
'''Consumer: only (yield).'''
while True:
r = (yield)
f(r)
def flip_array(data):
#return [data[-(i+1)] for i in range(len(data))]
print('Flipped')
def randomize_array(data):
#data = data.copy()
#return random.shuffle(data)
print('Random')
if __name__ == '__main__':
data = [1,3,5,6]
p = Pipeline(
flip_array,
randomize_array
)
datas = p.start((data)) |
import argparse
from ssd_project.functions.detection import *
from ssd_project.functions.multiboxloss import *
from ssd_project.model.ssd import *
from ssd_project.utils.global_variables import *
from ssd_project.utils.helpers import *
from ssd_project.utils.transformations import *
from ssd_project.utils.utils import *
def main(args):
global device
device = DEVICE
best_loss = BEST_LOSS
assert args.epochs > 0
assert args.batch_size == 8
torch.manual_seed(66)
np.random.seed(66)
start_epoch = START_EPOCH
if(args.pretrained_model is not None):
print("LOADED MODEL")
best_model = torch.load(args.pretrained_model)
model_state_dict = best_model["model_state_dict"]
start_epoch = best_model["epoch"]
best_loss = best_model["loss"]
epochs_since_improvement = best_model["epochs_since_improvement"]
model = build_ssd(num_classes = NUM_CLASSES)
model.load_state_dict(model_state_dict)
t_loss_normal, t_loss_avg = best_model["training_losses_batch_values"], best_model["training_losses_batch_avgs"]
v_loss_normal, v_loss_avg = best_model["validation_losses_batch_values"], best_model["validation_losses_batch_avgs"]
print("Model LOADED SUCCESSFULLY")
else:
v_loss_avg, v_loss_normal = [], []
t_loss_avg, t_loss_normal = [], []
#build SSD model
model = build_ssd(num_classes = NUM_CLASSES)
# initialize newly added layers' weights with xavier method
model.vgg.load_state_dict(VGG16_WEIGHTS_PRETRAINED)
model.extras.apply(weights_init)
model.loc.apply(weights_init)
model.conf.apply(weights_init)
biases = []
not_biases = []
#Initialize and SGD optimizer, with 2 times bigger learning rate
#Done in original CAFFE REPO - https://github.com/weiliu89/caffe/tree/ssd
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * args.lr}, {'params': not_biases}],
lr=args.lr, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
#Create Custom Datasets with applied transformations for both training and validation
train_dataset = TrainDataset(args.path_imgs, args.path_bboxes, args.path_labels, "TRAIN", args.split_ratio)
val_dataset = TrainDataset(args.path_imgs, args.path_bboxes, args.path_labels, "TEST", args.split_ratio)
#Create the DataLoader from for training and validation
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = args.batch_size,
shuffle = True, collate_fn = train_dataset.collate_fn,
num_workers = WORKERS, pin_memory = True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size = args.batch_size,
shuffle = True, collate_fn = val_dataset.collate_fn,
num_workers = WORKERS, pin_memory = True)
model = model.to(device)
loss_function = MultiBoxLoss(model.priors_cxcy).to(device)
for epoch in range(start_epoch, args.epochs):
train_losses = train(train_loader=train_loader,
model=model,
loss_function=loss_function,
optimizer=optimizer,
epoch=epoch)
val_losses = validate(val_loader = val_loader,
model = model,
loss_function = loss_function)
v_loss_avg.append(val_losses.avg)
t_loss_avg.append(train_losses.avg)
v_loss_normal.append(val_losses.val)
t_loss_normal.append(train_losses.val)
is_best = val_losses.avg < best_loss
best_loss = min(val_losses.avg, best_loss)
if not is_best:
epochs_since_improvement +=1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
save_best_trained(epoch, epochs_since_improvement, model, optimizer, best_loss,
(t_loss_normal, t_loss_avg), (v_loss_normal, v_loss_avg))
return model, optimizer, best_loss, epochs_since_improvement, args.epochs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train an SSD model')
parser.add_argument('--epochs', action="store", dest="epochs", type=int, default=EPOCHS)
parser.add_argument('--split-seed', action="store", dest="split_ratio", type=int, default=SPLIT_RATIO)
parser.add_argument('--batch-train', action="store", dest="batch_size", type=int, default=BATCH_SIZE)
parser.add_argument('--lr', action='store', dest='lr', type=float, default=1e-3 )
parser.add_argument('--model', action='store', dest='pretrained_model', default=None)
parser.add_argument('--path_imgs', action='store', dest='path_imgs', type=str, default='/data/ssd_ilija_data/original_images/')
parser.add_argument('--path_bboxes', action='store', dest='path_bboxes', type=str, default='/data/ssd_ilija_data/ground_truth/bboxes_labels/')
parser.add_argument('--path_labels', action='store', dest='path_labels', type=str, default='/data/ssd_ilija_data/ground_truth/bboxes_labels/')
main(parser.parse_args())
|
from .modules import *
from . import utils
__version__ = 1.2 |
import sqlite3
import traceback
#
# class consumator / consumer to send in background process.
#
class SqliteProcess:
def __init__(self):
self._error = False
self.errorException = None
self.conn = sqlite3.connect('process.db')
self._createtable()
##########################################################
def _createtable(self):
#
# Create Table if not exists ...
#
try:
c = self.conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS process (
id INTEGER PRIMARY KEY AUTOINCREMENT,
status TEXT NOT NULL DEFAULT 'W',
processcmd TEXT,
creationdate TIMESTAMP DEFAULT CURRENT_TIMESTAMP ,
launchdate TIMESTAMP DEFAULT 0,
finishdate TIMESTAMP DEFAULT 0)
"""
)
except Exception as e:
self.errorHappened(e)
##########################################################
def createEntry(self,cmd):
#
# Create a new entry
# return id
#
cur = self.conn.cursor()
cur.execute(" INSERT INTO process (processcmd) VALUES (\"%s\");" % ( cmd) )
self.conn.commit()
idprocess = cur.lastrowid
return idprocess
##########################################################
def reserveProcess(self, id ):
# reserve the process then no other launch it.
try:
cursor = self.conn.execute("UPDATE process SET launchdate = CURRENT_TIMESTAMP WHERE id =%d" % (id))
self.conn.commit()
except Exception as e:
self.errorHappened(e)
##########################################################
def getNextProcess(self):
#
# get next process to launch.
#
try:
cursor = self.conn.execute("SELECT id,processcmd FROM process WHERE launchdate =0")
rows = cursor.fetchall()
if len(rows)>0 :
return rows[0]
return None
except Exception as e:
self.errorHappened(e)
##########################################################
def processIsDone(self, id ):
self._updateprocess(id,"D")
##########################################################
def processFailed(self, id ):
self._updateprocess(id,"E")
##########################################################
def _updateprocess(self, id , status):
try:
cursor = self.conn.execute("UPDATE process SET status = '%s', finishdate =CURRENT_TIMESTAMP WHERE id =%d" % (status, id))
self.conn.commit()
except Exception as e:
self.errorHappened(e)
##########################################################
def errorHappened(self, e):
traceback.print_stack()
print(e)
self._error = True
self.errorException = e
##########################################################
def closeDB(self):
self.conn.close()
##########################################################
def IsErrorHappened(self):
return self._error
|
from touchio import TouchIn
from digitalio import DigitalInOut
from audioio import WaveFile, AudioOut
import board
def enable_speakers():
speaker_control = DigitalInOut(board.SPEAKER_ENABLE)
speaker_control.switch_to_output(value=True)
def play_file(speaker, path):
file = open(path, "rb")
audio = WaveFile(file)
speaker.play(audio)
class Handler:
def __init__(self, speaker):
self.speaker = speaker
def handle(self, name, state):
if state:
play_file(self.speaker, 'piano.wav')
class TouchEvent:
THRESHOLD_ADJUSTMENT = 400
def __init__(self, name, onchange):
self.name = name
self.last = False
self.onchange = onchange
pin = getattr(board, name)
self.touch = TouchIn(pin)
self.touch.threshold += self.THRESHOLD_ADJUSTMENT
def process(self):
current = self.touch.value
if current != self.last:
self.onchange(self.name, current)
self.last = current
enable_speakers()
speaker = AudioOut(board.SPEAKER)
handler = Handler(speaker)
event = TouchEvent('A1', handler.handle)
while True:
event.process()
|
#!/usr/bin/env python
# coding:utf-8
from django.db import models
class Resume(models.Model):
company = models.CharField(u'公司', max_length=200, help_text='就职公司')
position = models.CharField(u'职位', max_length=200)
entry_time = models.DateField(u'入职时间')
time_of_separation = models.DateField(u'离职时间', null=True, blank=True)
verbose_for_work = models.TextField(u'工作描述', null=True, blank=True)
class Meta:
ordering = ['-id']
def __str__(self):
return self.company
class ContactInfo(models.Model):
method = models.CharField(u'联系方式', max_length=200)
detail = models.CharField(u'详情', max_length=512)
def __str__(self):
return self.method
class Aboutme(models.Model):
item = models.CharField(u'社交', max_length=200)
link = models.URLField(u'链接', max_length=512)
def __str__(self):
return self.item
class OpenSourceProject(models.Model):
name = models.CharField(u'项目', max_length=200)
link = models.URLField(u'链接', max_length=512)
def __str__(self):
return self.name
class ContributedProject(models.Model):
name = models.CharField(u'项目', max_length=200)
link = models.URLField(u'链接', max_length=512)
def __str__(self):
return self.name
|
class Numbers:
MULTIPLIER = 3.5
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
@classmethod
def multiply(cls, a):
return cls.MULTIPLIER * a
@staticmethod
def substract(b, c):
return b - c
@property
def value(self):
return (self.x, self.y)
@value.setter
def value(self, xy_tuple):
self.x, self.y = xy_tuple
@value.deleter
def value(self):
del self.x
del self.y
# Pruebas
assert(Numbers.MULTIPLIER == 3.5)
n = Numbers(4, 4)
assert(n.add() == 8)
assert(Numbers.multiply(2) == 7.0)
assert(Numbers.substract(5, 3) == 2)
assert(n.value == (4, 4))
n.value = (8, 6)
assert(n.value == (8, 6))
del n.value
try:
print(n.x)
except Exception as e:
print(e)
try:
print(n.y)
except Exception as e:
print(e)
|
import sys
import logging
from functools import partial
from threading import Thread
from flask import Flask
from .settings import Configuration
from .worker import async_worker
from .automata import automata
from . import providers
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("webhooks")
app = Flask(__name__)
def main_func():
if len(sys.argv) != 2:
raise RuntimeError("You should provide the path to the settings YAML file as first argument")
Configuration.load(sys.argv[1])
# Future work: check the settings and allow multiple worker implementations (e.g. Celery)
Thread(target=async_worker,
name="webhook worker").start()
for name, automaton in automata.items():
app.add_url_rule(f'{ Configuration.webhook_url_path }/{ name }',
name, automaton.to_flask_view())
app.run(host=Configuration.listen_ip,
port=Configuration.listen_port)
def manual_trigger():
if len(sys.argv) != 3:
raise RuntimeError("You should provide the path to the settings YAML file as first argument "
"and the name of the automaton to be triggered (YAML key)")
Configuration.load(sys.argv[1])
try:
automaton = automata[sys.argv[2]]
except KeyError:
raise RuntimeError("Unrecognized automaton name '%s'" % sys.argv[2])
automaton.perform_actions()
|
"""
Copyright (c) 2018, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from wave_common.utils import import_class, validate_config, create_dirs
from dbconfig import DBConfig, create_db_config_from_config
import sys
class ProducerConfig:
"""
A data Producer configuration
"""
def __init__(self, name, config, rootpath, data_folder):
self.name = name
package = config.get(name, 'package')
module = config.get(name, 'module')
classname = config.get(name, 'producer')
self.data_id = config.get(name, 'ID', fallback=None)
sys.path.append(package)
# print(sys.path)
producer_class = import_class(module + '.' + classname)
print('load producer type ' + module + '.' + classname)
assert producer_class is not None
self.dataset = config.get(name, 'dataset')
self.database_section = config.get(name, 'database')
self.dbConfig = create_db_config_from_config(config, self.database_section)
self.dataConfig = DataConfig(rootpath, data_folder, self.name)
self.producer = producer_class(self.dbConfig)
print('load producer ' + str(producer_class))
assert self.producer is not None
print(type(self.producer).__name__)
validate_config(self.__dict__)
class DataConfig:
"""
User defined data set configuration info.
"""
def __init__(self, rootpath, datafolder, producername):
self.dataFolder = rootpath + '/' + datafolder + '/' + producername + "/data"
self.doneFolder = rootpath + '/' + datafolder + '/' + producername + '/done'
self.errorFolder = rootpath + '/' + datafolder + '/' + producername + '/errors'
validate_config(self.__dict__)
create_dirs(self.dataFolder)
create_dirs(self.doneFolder)
create_dirs(self.errorFolder)
class Setup:
"""
User defined environment configuration info.
"""
def __init__(self, config):
setupSection = "Setup"
self.producers = config.get(setupSection, 'dataProducers').split(',')
self.endpoint = config.get(setupSection, 'endpoint')
self.rootPath = config.get(setupSection, 'rootPath')
self.dataFolder = config.get(setupSection, 'dataFolder')
self.auth_url = config.get(setupSection, 'authUrl')
self.resource_url = config.get(setupSection, 'resourceUrl')
self.is_verify = config.get(setupSection, 'is_verify_upload')
validate_config(self.__dict__)
|
import numpy as np
from unittest import TestCase
from cmaes.cma import _decompress_symmetric, _compress_symmetric
class TestCompressSymmetric(TestCase):
def test_compress_symmetric_odd(self):
sym2d = np.array([[1, 2], [2, 3]])
actual = _compress_symmetric(sym2d)
expected = np.array([1, 2, 3])
self.assertTrue(np.all(np.equal(actual, expected)))
def test_compress_symmetric_even(self):
sym2d = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]])
actual = _compress_symmetric(sym2d)
expected = np.array([1, 2, 3, 4, 5, 6])
self.assertTrue(np.all(np.equal(actual, expected)))
def test_decompress_symmetric_odd(self):
sym1d = np.array([1, 2, 3])
actual = _decompress_symmetric(sym1d)
expected = np.array([[1, 2], [2, 3]])
self.assertTrue(np.all(np.equal(actual, expected)))
def test_decompress_symmetric_even(self):
sym1d = np.array([1, 2, 3, 4, 5, 6])
actual = _decompress_symmetric(sym1d)
expected = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]])
self.assertTrue(np.all(np.equal(actual, expected)))
|
from password_manager.encryption.key_generator import KeyGenerator
def test_generate():
generator = KeyGenerator("password")
key, metadata = generator.generate()
assert len(key) == 32
assert metadata.iterations == 1000_000
assert metadata.hmac == 'SHA512'
|
"""
Functions for reading and writing files
"""
from pathlib import Path
import time
import numpy as np
import pandas as pd
def load_array(files):
"""Create a 3D numpy array from a list
of ascii files containing 2D arrays.
Parameters
----------
files : sequence
List of text files. Array layers will be
ordered the same as the files.
Returns
-------
array3d : 3D numpy array
"""
if isinstance(files, str) or isinstance(files, Path):
return np.loadtxt(files)
arrays = []
for f in files:
arrays.append(np.loadtxt(f))
array3d = np.stack(arrays)
return array3d
def read_csv(csvfile, col_limit=1e4, **kwargs):
"""Read tabular data with pandas.read_csv,
unless the data are super wide (col_limit or greater columns),
in which case read the data using pure python. The pure
python approach below is apparently much faster than
pandas.read_csv for very wide files.
Parameters
----------
csvfile : str or pathlike
col_limit : int
Column threshold at which to use pure python
instead of pandas.read_csv, by default 10e4
**kwargs : keyword arguments to pandas.read_csv or
pandas.DataFrame (in the case of a wide file)
"""
t0 = time.time()
# get the header length
delim = kwargs.get('delimiter', ',')
if kwargs.get('delim_whitespace', False):
delim = ' '
with open(csvfile) as src:
header = next(iter(src)).split(delim)
if len(header) > col_limit:
lines = []
with open(csvfile) as src:
header = next(iter(src)).strip().split(',')
for line in src:
lines.append(line.strip().split(','))
# handle duplicate columns in the same way that pandas does
col_counts = {}
new_header = []
for col in header:
if col not in col_counts:
col_counts[col] = 1
append_column_name = col
else:
append_column_name = f"{col}.{col_counts[col]}"
col_counts[col] += 1
new_header.append(append_column_name)
df = pd.DataFrame(lines, columns=new_header, **kwargs)
else:
df = pd.read_csv(csvfile, **kwargs)
print("took {:.2f}s\n".format(time.time() - t0))
return df
def write_insfile(results_dataframe, outfile, obsnme_column='obsnme',
simulated_obsval_column='modelled', index=True):
"""Write instruction file for PEST. Assumes that
observations names are in an obsnme_column and
that the observation values an obsval_column. The values in obsval_column
will be replaced in the instruction file with the names in obsnme_column.
Parameters
----------
results_dataframe : pandas dataframe
Processed model output, in same structure/format
as the processed output file.
outfile : filepath
Name of instruction file.
obsnme_column : str
Column in results_dataframe with observation names
simulated_obsval_column : str
Column in results_dataframe with the simulated observation equivalents
index : bool
Whether or not the index should be included; needs to be the same as the
actual results file.
"""
ins = results_dataframe.copy()
# if the index is included, move it to the columns
if index:
ins.reset_index(inplace=True)
# fill the index with the 'l1' (line advance) flag for PEST ins file reader
ins.index = ['l1'] * len(ins)
cols = ins.columns.tolist()
# replace the observation values with the obsnames
ins[simulated_obsval_column] = ['!{}!'.format(s) for s in results_dataframe[obsnme_column]]
# fill the remaining columns with whitespace flags
for c in cols:
if c != simulated_obsval_column:
ins[c] = 'w'
# write the output
with open(outfile, 'w', newline="") as dest:
dest.write('pif @\n@{}@\n'.format(obsnme_column))
ins.to_csv(dest, sep=' ', index=True, header=False)
print(f'wrote {len(ins):,} observation instructions to {outfile}') |
#! /usr/bin/env python
import os, subprocess, shutil, random, optparse
comp = {
'bz2': 'cjf',
'xz' : 'cJf',
'gz' : 'czf',
}
def read_wafdir():
try:
os.listdir('waflib')
except:
raise ImportError('please provide a waflib directory in the current folder')
d = 'waflib'
lst = [d + os.sep + x for x in os.listdir(d) if x.endswith('.py')]
e = d + os.sep + 'Tools'
lst.extend([e + os.sep + x for x in os.listdir(e) if x.endswith('.py')])
f = d + os.sep + 'extras'
lst.extend([f + os.sep + x for x in os.listdir(f) if x.endswith('.py')])
random.shuffle(lst)
#lst.sort()
return lst
def gen(lst, options):
if options.maxi:
opti_ref = 0
filename = 'max.tar.%s' % options.kind
def compare(a, b):
return a > b
else:
opti_ref = 1000000000
filename = 'min.tar.%s' % options.kind
def compare(a, b):
return a < b
cmd = 'tar %s %s ' % (comp[options.kind], filename)
opti = [opti_ref]
LEN = len(lst)
POP = 3*LEN + 1
popul = [range(LEN) for x in xrange(POP)]
fitn = [0 for x in xrange(POP)]
def rnd():
return random.randint(0, LEN -1)
def mutate():
for x in xrange(LEN):
# rotate the previous element by one
v = popul[x+LEN] = popul[x+LEN - 1]
a = v.pop(0)
v.append(a)
for x in xrange(LEN):
# swap elements
a = rnd()
b = rnd()
v = popul[x]
c = v[a]
v[a] = v[b]
v[b] = c
for x in xrange(LEN):
# get one element out, add at the end
v = popul[x+2*LEN]
a = rnd()
c = v[a]
del v[a]
v.append(c)
def evil():
best = opti_ref
pos = -1
for x in xrange(len(popul)):
v = popul[x]
arr = [lst[a] for a in v]
tmp = '%s %s' % (cmd, ' '.join(arr))
subprocess.Popen(tmp, shell=True).wait()
siz = os.stat(filename).st_size
fitn[x] = siz
if compare(siz, best):
best = siz
pos = x
if compare(siz, opti[0]):
opti[0] = siz
shutil.copy2(filename, 'best_' + filename)
#print popul[x], sum(popul[x]), sum(range(LEN))
assert (sum(popul[x]) == sum(range(LEN)))
#print pos
for x in xrange(len(popul)):
if x == pos:
continue
popul[x] = popul[pos][:]
assert(len(popul[x]) == LEN)
return best
for i in xrange(10000):
mutate()
print(evil())
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--max', dest='maxi', default=False, action='store_true', help='maximize the file size (default is minimize)')
parser.add_option('--kind', dest='kind', default='bz2', action='store', help='bz2, xz or gz')
(options, args) = parser.parse_args()
lst = read_wafdir()
gen(lst, options)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0004_auto_20160530_0354'),
]
operations = [
migrations.AlterField(
model_name='forumpost',
name='parent',
field=models.ForeignKey(related_name='posts', to='forum.ForumTopic'),
preserve_default=True,
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/18 12:50
# @Author : Eiya_ming
# @Email : eiyaming@163.com
# @File : v10_serial.py
import serial #导入模块
import serial.tools.list_ports
port_list = list(serial.tools.list_ports.comports())
print(port_list)
if len(port_list) == 0:
print('无可用串口')
else:
for i in range(0,len(port_list)):
print(port_list[i])
try:
portx = port_list[0].name
bps = 115200
# 超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
timex = None
ser = serial.Serial(portx, bps, timeout=timex)
print("串口详情参数:", ser)
# 十六进制的读取
print(ser.read().hex()) # 读一个字节
print("---------------")
ser.close() # 关闭串口
except Exception as e:
print("---异常---:", e) |
#####################################
# Developed for studies #
# Developed by: Lyon kevin #
#####################################
import socket
#sock
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#Control not to generate DOS
som = 0
while True:
#Send only one message
if som < 1:
data = 'hello :)'.encode()
som += 1
client.sendto(data, ("127.0.0.1", 666))
#Received
rec = client.recv(1024)
print (f"\nAnswer: {rec.decode()}\n")
#Stop
break
#Close session
client.close() |
x = "your"
y = "mom"
print(x + y) |
import json
from collections import OrderedDict
from .helpers import to_b64
class DictToBase64Normaliser():
def __init__(self, dictionary):
self.__dictionary = dictionary
def normalise(self):
"""
Normalise a python dictionary, encode it as JSON return it base64
encoded.
"""
self._validate_is_dictionary()
self._validate_not_empty()
self._stringify_numbers()
self._order_by_keys()
return to_b64(self._encode_to_json())
def _validate_is_dictionary(self):
pass
def _validate_not_empty(self):
pass
def _stringify_numbers(self):
self.__dictionary = {k: str(v) for k, v in self.__dictionary.items()}
def _order_by_keys(self):
self.__dictionary = OrderedDict(
sorted(self.__dictionary.items(), key=lambda x: x[0], reverse=True)
)
def _encode_to_json(self):
return json.dumps(self.__dictionary, indent=0).encode('utf-8')
|
# Copyright 2014 CERN.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.identity.v3 import mapping
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
class TestMapping(identity_fakes.TestFederatedIdentity):
def setUp(self):
super(TestMapping, self).setUp()
federation_lib = self.app.client_manager.identity.federation
self.mapping_mock = federation_lib.mappings
self.mapping_mock.reset_mock()
class TestMappingCreate(TestMapping):
def setUp(self):
super(TestMappingCreate, self).setUp()
self.mapping_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.MAPPING_RESPONSE),
loaded=True
)
self.cmd = mapping.CreateMapping(self.app, None)
def test_create_mapping(self):
arglist = [
'--rules', identity_fakes.mapping_rules_file_path,
identity_fakes.mapping_id
]
verifylist = [
('mapping', identity_fakes.mapping_id),
('rules', identity_fakes.mapping_rules_file_path)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
mocker = mock.Mock()
mocker.return_value = identity_fakes.MAPPING_RULES
with mock.patch("openstackclient.identity.v3.mapping."
"CreateMapping._read_rules", mocker):
columns, data = self.cmd.take_action(parsed_args)
self.mapping_mock.create.assert_called_with(
mapping_id=identity_fakes.mapping_id,
rules=identity_fakes.MAPPING_RULES)
collist = ('id', 'rules')
self.assertEqual(collist, columns)
datalist = (identity_fakes.mapping_id,
identity_fakes.MAPPING_RULES)
self.assertEqual(datalist, data)
class TestMappingDelete(TestMapping):
def setUp(self):
super(TestMappingDelete, self).setUp()
self.mapping_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.MAPPING_RESPONSE),
loaded=True)
self.mapping_mock.delete.return_value = None
self.cmd = mapping.DeleteMapping(self.app, None)
def test_delete_mapping(self):
arglist = [
identity_fakes.mapping_id
]
verifylist = [
('mapping', identity_fakes.mapping_id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.mapping_mock.delete.assert_called_with(
identity_fakes.mapping_id)
self.assertIsNone(result)
class TestMappingList(TestMapping):
def setUp(self):
super(TestMappingList, self).setUp()
self.mapping_mock.get.return_value = fakes.FakeResource(
None,
{'id': identity_fakes.mapping_id},
loaded=True)
# Pretend list command returns list of two mappings.
# NOTE(marek-denis): We are returning FakeResources with mapping id
# only as ShowMapping class is implemented in a way where rules will
# not be displayed, only mapping ids.
self.mapping_mock.list.return_value = [
fakes.FakeResource(
None,
{'id': identity_fakes.mapping_id},
loaded=True,
),
fakes.FakeResource(
None,
{'id': 'extra_mapping'},
loaded=True,
),
]
# Get the command object to test
self.cmd = mapping.ListMapping(self.app, None)
def test_mapping_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.mapping_mock.list.assert_called_with()
collist = ('ID',)
self.assertEqual(collist, columns)
datalist = [(identity_fakes.mapping_id,), ('extra_mapping',)]
self.assertEqual(datalist, data)
class TestMappingSet(TestMapping):
def setUp(self):
super(TestMappingSet, self).setUp()
self.mapping_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.MAPPING_RESPONSE),
loaded=True
)
self.mapping_mock.update.return_value = fakes.FakeResource(
None,
identity_fakes.MAPPING_RESPONSE_2,
loaded=True
)
# Get the command object to test
self.cmd = mapping.SetMapping(self.app, None)
def test_set_new_rules(self):
arglist = [
'--rules', identity_fakes.mapping_rules_file_path,
identity_fakes.mapping_id
]
verifylist = [
('mapping', identity_fakes.mapping_id),
('rules', identity_fakes.mapping_rules_file_path)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
mocker = mock.Mock()
mocker.return_value = identity_fakes.MAPPING_RULES_2
with mock.patch("openstackclient.identity.v3.mapping."
"SetMapping._read_rules", mocker):
columns, data = self.cmd.take_action(parsed_args)
self.mapping_mock.update.assert_called_with(
mapping=identity_fakes.mapping_id,
rules=identity_fakes.MAPPING_RULES_2)
collist = ('id', 'rules')
self.assertEqual(collist, columns)
datalist = (identity_fakes.mapping_id,
identity_fakes.MAPPING_RULES_2)
self.assertEqual(datalist, data)
def test_set_rules_wrong_file_path(self):
arglist = [
'--rules', identity_fakes.mapping_rules_file_path,
identity_fakes.mapping_id
]
verifylist = [
('mapping', identity_fakes.mapping_id),
('rules', identity_fakes.mapping_rules_file_path)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
class TestMappingShow(TestMapping):
def setUp(self):
super(TestMappingShow, self).setUp()
self.mapping_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.MAPPING_RESPONSE),
loaded=True
)
self.cmd = mapping.ShowMapping(self.app, None)
def test_mapping_show(self):
arglist = [
identity_fakes.mapping_id
]
verifylist = [
('mapping', identity_fakes.mapping_id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.mapping_mock.get.assert_called_with(
identity_fakes.mapping_id)
collist = ('id', 'rules')
self.assertEqual(collist, columns)
datalist = (identity_fakes.mapping_id,
identity_fakes.MAPPING_RULES)
self.assertEqual(datalist, data)
|
import logging
import os
log_format = "%(asctime)s: %(filename)s %(funcName)s :%(lineno)s => %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
log = logging.getLogger("http_server")
if not os.path.exists("logs"):
os.makedirs("logs")
file_handler = logging.FileHandler(f"logs/httpserver.log")
formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
|
from abc import ABCMeta, abstractmethod
class Detector:
"""
This class is an abstract class for general error detection,
it requires for every sub-class to implement the
get_clean_cells and get_noisy_cells method
"""
__metaclass__ = ABCMeta
def __init__(self, name):
"""
Construct error detection object
:param dataset: A dataset object
"""
self.name = name
self.ds = None
@abstractmethod
def setup(self, dataset, env):
raise NotImplementedError
@abstractmethod
def detect_noisy_cells(self):
"""
This method creates a dataframe which has the information
(tuple index,attribute) for the dk_cells
:return dataframe for the dk_cell
"""
raise NotImplementedError |
# Generated by Django 3.2.4 on 2021-08-27 14:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('doublecount', '0002_auto_20210824_1438'),
]
operations = [
migrations.AlterField(
model_name='doublecountingproduction',
name='dca',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='production', to='doublecount.doublecountingagreement'),
),
migrations.AlterField(
model_name='doublecountingsourcing',
name='dca',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sourcing', to='doublecount.doublecountingagreement'),
),
]
|
import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("cpoll_cppsp/www/connectioninfo.H", "\\#define BENCHMARK_DB_HOST \".*\"", "#define BENCHMARK_DB_HOST \"" + args.database_host + "\"")
subprocess.check_call("make", shell=True, cwd="cpoll_cppsp", stderr=errfile, stdout=logfile)
subprocess.Popen("./run_application \"$(pwd)\"/www -g g++-4.8 -m /forcedynamic.cppsm", shell=True, cwd="cpoll_cppsp", stderr=errfile, stdout=logfile);
return 0
def stop(logfile, errfile):
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'cppsp_standalone' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
|
#-----------------Libraries-------------------------
###System I.O Lib. Need to be Added for Later Usage
import hashlib
#-----------------Variables-------------------------
##Var. for Input File (No New Line -> tr -d '\n')
###Additional Code Required for Asking for User's Input
infile = '8H'
##Open Filestream (infile) / Append Mode
###Additional Code Required for Asking for User's Input
outfile = open("MD.txt","a")
#------------------SHA256_Hashing_Algorithm--------------
##Open Filestream (outfile) / Readonly Mode
with open(infile, "r") as inf:
##For Loop -> Read the File Line-by-Line
for line in inf:
##Write SHA256 Hash Digest to th e Output/ Strip(\n) / Ecnode(for Py3.x) / Newline
outfile.write(hashlib.sha256(line.strip().encode('utf-8')).hexdigest()+'\n')
######
#print(line, end="")
#print (hashlib.md5(line.strip().encode('utf-8')).hexdigest())
|
class MyBomb:
def __init__(self, start):
print(f'Activating the bomb and it will explode in {start} seconds')
self.start = start
def __iter__(self):
return MyBombIterator(self.start)
class MyBombIterator:
def __init__(self, count):
self.count = count
def __next__(self):
if self.count <= 0:
print('BAMM!!')
raise StopIteration
value = self.count
self.count -= 1
return value
|
import json
import subprocess
def docker_names():
"""Get docker names"""
command = 'docker ps --format "{{.Names}}"'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
names = process.stdout.readlines()
if names:
names = [s.decode('ascii').replace('\n', '') for s in names]
mysql = [k for k in names if '_mysql_' in k][0]
wikibase = [k for k in names if '_wikibase_' in k][0]
output = [mysql, wikibase]
else:
output = None
process.poll()
return output
def docker_inspect(container):
"""Wrapper for 'docker inspect'. Returns dictionary of settings or empty
dictionary in case of error."""
result = subprocess.run(
['docker', 'inspect', container],
capture_output=True,
)
if result.returncode != 0:
print("Error querying docker daemon:", result.stdout, result.stderr)
return {}
return json.loads(result.stdout)[0]
def docker_env(container):
"""Returns dictionary of container environment variables or empty
dictionary in case of error."""
settings = docker_inspect(container)
if not len(settings):
return {}
env_list = settings["Config"]["Env"]
return {i[0:i.find('=')]: i[i.find('=') + 1:] for i in env_list}
def docker_ports(container):
"""Returns dictionary of container network ports or empty dictionary in
case of error."""
settings = docker_inspect(container)
if not len(settings):
return {}
port_dict = settings["NetworkSettings"]["Ports"]
return port_dict
|
import numpy as np
from numba.decorators import jit, autojit
import hickle
import os, gzip
def binary_search(a, x):
lo = 0
hi = a.shape[0]
while lo < hi:
mid = (lo + hi) // 2
midval = a[mid]
if midval < x:
lo = mid + 1
elif midval > x:
hi = mid
else:
return mid
return -1
binary_search_numba = autojit(binary_search, nopython=True)
def extract(all_elems_codes, out, ascii_list):
MAX_STR = out.shape[0]
cur_num_str = 0
i = all_elems_codes.shape[0] - 1
state = 0
cur_end = -1
min_length = 4
count_one = 0
count_two = 0
count_three = 0
while i >= 1:
if all_elems_codes[i] == 0:
if (state == 1):
if (cur_end - i - 1 >= min_length):
out[cur_num_str, 0] = i + 1
out[cur_num_str, 1] = cur_end
cur_num_str += 1
elif (cur_end - i - 1 == 1):
count_one += 1
elif (cur_end - i - 1 == 2):
count_two += 1
elif (cur_end - i - 1 == 3):
count_three += 1
state = 1
cur_end = i
else:
if binary_search_numba(ascii_list, all_elems_codes[i]) == -1:
if (state == 1):
state = 0
if (cur_end - i - 1 >= min_length):
out[cur_num_str, 0] = i + 1
out[cur_num_str, 1] = cur_end
cur_num_str += 1
elif (cur_end - i - 1 == 1):
count_one += 1
elif (cur_end - i - 1 == 2):
count_two += 1
elif (cur_end - i - 1 == 3):
count_three += 1
i -= 1
if cur_num_str == MAX_STR:
break
return cur_num_str, count_one, count_two, count_three
ex_numba = autojit(extract, nopython=True)
def get_dict():
d = {format(key, '02X'): key for key in range(256)}
d['??'] = 256
return d
def get_strings(byte_data):
text = byte_data
name = ''
lines = ''.join(text).split('\n')
all_elems_codes = []
convert_dict = get_dict()
ascii_list = np.array(list(range(32, 127)) + [13, 10])
ascii_list.sort()
for l in lines:
elems = l.split(' ')
all_elems_codes.extend([convert_dict[x] for x in elems[1:]])
all_elems_codes = np.array(all_elems_codes)
out_ = np.zeros([15000, 2], dtype=np.int64)
m,count_one,count_two, count_three = ex_numba(all_elems_codes, out_, ascii_list)
string_total_len = np.sum(out_[:,1] - out_[:,0]) + count_one + count_two + count_three
string_ratio = float(string_total_len)/len(all_elems_codes)
strings = []
for i in range(m):
strings.extend(
[''.join([chr(x) for x in all_elems_codes[out_[i, 0]:out_[i, 1]]])])
return [name, strings, [count_one,count_two,count_three,string_total_len,string_ratio]]
def extract_length(data):
another_f = np.vstack([x[2] for x in data])
len_arrays = [np.array([len(y) for y in x[1]] + [0]+[10000]) for x in data]
bincounts = [ np.bincount(arr) for arr in len_arrays]
counts = np.concatenate([another_f[:,:3], np.vstack([ arr[4:100] for arr in bincounts])],axis = 1)
counts_0_10 = np.sum(counts[:,0:10],axis = 1)[:,None]
counts_10_30 = np.sum(counts[:,10:30],axis = 1)[:,None]
counts_30_60 = np.sum(counts[:,30:60],axis = 1)[:,None]
counts_60_90 = np.sum(counts[:,60:90],axis = 1)[:,None]
counts_0_100 = np.sum(counts[:,0:100],axis = 1)[:,None]
counts_100_150 = [np.sum(arr[100:150]) for arr in bincounts]
counts_150_250 = [np.sum(arr[150:250]) for arr in bincounts]
counts_250_400 = [np.sum(arr[250:450]) for arr in bincounts]
counts_400_600 = [np.sum(arr[400:600]) for arr in bincounts]
counts_600_900 = [np.sum(arr[600:900]) for arr in bincounts]
counts_900_1300 = [np.sum(arr[900:1300]) for arr in bincounts]
counts_1300_2000 = [np.sum(arr[1300:2000]) for arr in bincounts]
counts_2000_3000 = [np.sum(arr[2000:3000]) for arr in bincounts]
counts_3000_6000 = [np.sum(arr[3000:6000]) for arr in bincounts]
counts_6000_15000 = [np.sum(arr[6000:15000]) for arr in bincounts]
med = np.array([np.median([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
mean = np.array([np.mean([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
var = np.array([np.var([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
feats = np.concatenate([np.vstack(counts),
counts_0_10,
counts_10_30,
counts_30_60,
counts_60_90,
counts_0_100,
np.array(counts_100_150)[:,None],
np.array(counts_150_250)[:,None],
np.array(counts_250_400)[:,None],
np.array(counts_400_600)[:,None],
np.array(counts_600_900)[:,None],
np.array(counts_900_1300)[:,None],
np.array(counts_1300_2000)[:,None],
np.array(counts_2000_3000)[:,None],
np.array(counts_3000_6000)[:,None],
np.array(counts_6000_15000)[:,None],
another_f[:,3:]
],axis = 1)
return feats
def dump_names(strings_feats_dir):
n = ['string_len_counts_' + str(x) for x in range(1,100)] + [
'string_len_counts_0_10',
'string_len_counts_10_30',
'string_len_counts_30_60',
'string_len_counts_60_90',
'string_len_counts_0_100',
'string_len_counts_100_150',
'string_len_counts_150_250',
'string_len_counts_250_400',
'string_len_counts_400_600',
'string_len_counts_600_900',
'string_len_counts_900_1300',
'string_len_counts_1300_2000',
'string_len_counts_2000_3000',
'string_len_counts_3000_6000',
'string_len_counts_6000_15000',
'string_total_len',
'string_ratio'
]
hickle.dump(n,os.path.join(strings_feats_dir,'strings_feats_names')) |
#
# PySNMP MIB module A3COM-HUAWEI-LB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-LB-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:50:48 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
h3cCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cCommon")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, ObjectIdentity, NotificationType, IpAddress, Integer32, TimeTicks, ModuleIdentity, Gauge32, Unsigned32, MibIdentifier, Bits, iso, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "NotificationType", "IpAddress", "Integer32", "TimeTicks", "ModuleIdentity", "Gauge32", "Unsigned32", "MibIdentifier", "Bits", "iso", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
h3cLB = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116))
h3cLB.setRevisions(('2010-12-01 00:00',))
if mibBuilder.loadTexts: h3cLB.setLastUpdated('201012010000Z')
if mibBuilder.loadTexts: h3cLB.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
h3cLBTables = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1))
h3cLBRealServerGroupTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 1), )
if mibBuilder.loadTexts: h3cLBRealServerGroupTable.setStatus('current')
h3cLBRealServerGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 1, 1), ).setIndexNames((0, "A3COM-HUAWEI-LB-MIB", "h3cLBRealServerGroupName"))
if mibBuilder.loadTexts: h3cLBRealServerGroupEntry.setStatus('current')
h3cLBRealServerGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cLBRealServerGroupName.setStatus('current')
h3cLBRealServerTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 2), )
if mibBuilder.loadTexts: h3cLBRealServerTable.setStatus('current')
h3cLBRealServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-LB-MIB", "h3cLBRealServerGroupName"), (0, "A3COM-HUAWEI-LB-MIB", "h3cLBRealServerName"))
if mibBuilder.loadTexts: h3cLBRealServerEntry.setStatus('current')
h3cLBRealServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cLBRealServerName.setStatus('current')
h3cLBRealServerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("slowdown", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cLBRealServerStatus.setStatus('current')
h3cLBRealServerConnectNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 1, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cLBRealServerConnectNumber.setStatus('current')
h3cLBTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 2))
h3cLBTrapPrex = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 2, 0))
h3cLBRealServerOverLoad = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 116, 2, 0, 1)).setObjects(("A3COM-HUAWEI-LB-MIB", "h3cLBRealServerGroupName"), ("A3COM-HUAWEI-LB-MIB", "h3cLBRealServerName"), ("A3COM-HUAWEI-LB-MIB", "h3cLBRealServerConnectNumber"))
if mibBuilder.loadTexts: h3cLBRealServerOverLoad.setStatus('current')
mibBuilder.exportSymbols("A3COM-HUAWEI-LB-MIB", h3cLBRealServerGroupTable=h3cLBRealServerGroupTable, h3cLBTrapPrex=h3cLBTrapPrex, h3cLBRealServerName=h3cLBRealServerName, h3cLBRealServerGroupName=h3cLBRealServerGroupName, h3cLBRealServerOverLoad=h3cLBRealServerOverLoad, PYSNMP_MODULE_ID=h3cLB, h3cLBRealServerEntry=h3cLBRealServerEntry, h3cLBTables=h3cLBTables, h3cLB=h3cLB, h3cLBTrap=h3cLBTrap, h3cLBRealServerStatus=h3cLBRealServerStatus, h3cLBRealServerGroupEntry=h3cLBRealServerGroupEntry, h3cLBRealServerTable=h3cLBRealServerTable, h3cLBRealServerConnectNumber=h3cLBRealServerConnectNumber)
|
from requests import Request
def say(text):
host = "https://tts.chez.work/say"
params = {
"text": f"{text}",
"voice": "aleksandr",
"format": "opus",
"rate": "55",
"pitch": "10",
"volume": "70"
}
return Request("GET", host, params=params).prepare().url
|
from inspect import Signature, iscoroutinefunction
from typing import Any, Dict, List, Optional, Union, cast
from pydantic import validate_arguments
from pydantic.typing import AnyCallable
from starlite.exceptions import ImproperlyConfiguredException
from starlite.handlers.base import BaseRouteHandler
from starlite.types import Guard
class ASGIRouteHandler(BaseRouteHandler):
@validate_arguments(config={"arbitrary_types_allowed": True})
def __init__(
self,
path: Union[Optional[str], Optional[List[str]]] = None,
guards: Optional[List[Guard]] = None,
opt: Optional[Dict[str, Any]] = None,
):
super().__init__(path=path, guards=guards, opt=opt)
def __call__(self, fn: AnyCallable) -> "ASGIRouteHandler":
"""
Replaces a function with itself
"""
self.fn = fn
self.validate_handler_function()
return self
def validate_handler_function(self) -> None:
"""
Validates the route handler function once it's set by inspecting its return annotations
"""
super().validate_handler_function()
signature = Signature.from_callable(cast(AnyCallable, self.fn))
if signature.return_annotation is not None:
raise ImproperlyConfiguredException("ASGI handler functions should return 'None'")
if any(key not in signature.parameters for key in ["scope", "send", "receive"]):
raise ImproperlyConfiguredException(
"ASGI handler functions should define 'scope', 'send' and 'receive' arguments"
)
if not iscoroutinefunction(self.fn) and not iscoroutinefunction(self.fn.__call__): # type: ignore[operator]
raise ImproperlyConfiguredException("Functions decorated with 'asgi' must be async functions")
asgi = ASGIRouteHandler
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from runtime import runtime_pb2 as runtime_dot_runtime__pb2
class RuntimeStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/runtime.Runtime/Create',
request_serializer=runtime_dot_runtime__pb2.CreateRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.CreateResponse.FromString,
)
self.Read = channel.unary_unary(
'/runtime.Runtime/Read',
request_serializer=runtime_dot_runtime__pb2.ReadRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.ReadResponse.FromString,
)
self.Delete = channel.unary_unary(
'/runtime.Runtime/Delete',
request_serializer=runtime_dot_runtime__pb2.DeleteRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.DeleteResponse.FromString,
)
self.Update = channel.unary_unary(
'/runtime.Runtime/Update',
request_serializer=runtime_dot_runtime__pb2.UpdateRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.UpdateResponse.FromString,
)
self.Logs = channel.unary_stream(
'/runtime.Runtime/Logs',
request_serializer=runtime_dot_runtime__pb2.LogsRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.LogRecord.FromString,
)
self.CreateNamespace = channel.unary_unary(
'/runtime.Runtime/CreateNamespace',
request_serializer=runtime_dot_runtime__pb2.CreateNamespaceRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.CreateNamespaceResponse.FromString,
)
self.DeleteNamespace = channel.unary_unary(
'/runtime.Runtime/DeleteNamespace',
request_serializer=runtime_dot_runtime__pb2.DeleteNamespaceRequest.SerializeToString,
response_deserializer=runtime_dot_runtime__pb2.DeleteNamespaceResponse.FromString,
)
class RuntimeServicer(object):
"""Missing associated documentation comment in .proto file."""
def Create(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Read(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Logs(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateNamespace(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteNamespace(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=runtime_dot_runtime__pb2.CreateRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.CreateResponse.SerializeToString,
),
'Read': grpc.unary_unary_rpc_method_handler(
servicer.Read,
request_deserializer=runtime_dot_runtime__pb2.ReadRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.ReadResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=runtime_dot_runtime__pb2.DeleteRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.DeleteResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=runtime_dot_runtime__pb2.UpdateRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.UpdateResponse.SerializeToString,
),
'Logs': grpc.unary_stream_rpc_method_handler(
servicer.Logs,
request_deserializer=runtime_dot_runtime__pb2.LogsRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.LogRecord.SerializeToString,
),
'CreateNamespace': grpc.unary_unary_rpc_method_handler(
servicer.CreateNamespace,
request_deserializer=runtime_dot_runtime__pb2.CreateNamespaceRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.CreateNamespaceResponse.SerializeToString,
),
'DeleteNamespace': grpc.unary_unary_rpc_method_handler(
servicer.DeleteNamespace,
request_deserializer=runtime_dot_runtime__pb2.DeleteNamespaceRequest.FromString,
response_serializer=runtime_dot_runtime__pb2.DeleteNamespaceResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'runtime.Runtime', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Runtime(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/Create',
runtime_dot_runtime__pb2.CreateRequest.SerializeToString,
runtime_dot_runtime__pb2.CreateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Read(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/Read',
runtime_dot_runtime__pb2.ReadRequest.SerializeToString,
runtime_dot_runtime__pb2.ReadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/Delete',
runtime_dot_runtime__pb2.DeleteRequest.SerializeToString,
runtime_dot_runtime__pb2.DeleteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/Update',
runtime_dot_runtime__pb2.UpdateRequest.SerializeToString,
runtime_dot_runtime__pb2.UpdateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Logs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/runtime.Runtime/Logs',
runtime_dot_runtime__pb2.LogsRequest.SerializeToString,
runtime_dot_runtime__pb2.LogRecord.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateNamespace(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/CreateNamespace',
runtime_dot_runtime__pb2.CreateNamespaceRequest.SerializeToString,
runtime_dot_runtime__pb2.CreateNamespaceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteNamespace(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/runtime.Runtime/DeleteNamespace',
runtime_dot_runtime__pb2.DeleteNamespaceRequest.SerializeToString,
runtime_dot_runtime__pb2.DeleteNamespaceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
import numpy as np
from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum
from autotune import TuningProblem
from autotune.space import *
import os
import sys
import time
import json
import math
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from skopt.space import Real, Integer, Categorical
HERE = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.dirname(HERE)+ '/tools')
from plopper import Plopper
#nparams = 8
cs = CS.ConfigurationSpace(seed=1234)
p0= CSH.CategoricalHyperparameter(name='p0', choices=["#warning \"hello\"", "" ], default_value='')
p1= CSH.CategoricalHyperparameter(name='p1', choices=["#warning \"hello\"", "" ], default_value='')
p2= CSH.CategoricalHyperparameter(name='p2', choices=["#warning \"hello\"", "" ], default_value='')
p3= CSH.CategoricalHyperparameter(name='p3', choices=["#warning \"hello\"", "" ], default_value='')
'''
p0= CSH.CategoricalHyperparameter(name='p0', choices=["#pragma clang loop(i102) pack array(A) allocate(malloc)", "" ], default_value='')
p1= CSH.CategoricalHyperparameter(name='p1', choices=["#pragma clang loop(i102) pack array(B) allocate(malloc)", "" ], default_value='')
p2= CSH.CategoricalHyperparameter(name='p2', choices=["#pragma clang loop(i102) pack array(X) allocate(malloc)", ""], default_value='')
p3= CSH.CategoricalHyperparameter(name='p3', choices=["#pragma clang loop(i11) pack array(B) allocate(malloc)", ""], default_value='')
'''
#pragma clang loop(i11) pack array(X) allocate(malloc)
#TODO permutations
#p2= CSH.CategoricalHyperparameter(name='p2', choices=["#pragma clang loop(i1,j1,k1,i2,j2) interchange permutation(j1,k1,i1,j2,i2)", " "], default_value=' ')
p4= CSH.OrdinalHyperparameter(name='p4', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
p5= CSH.OrdinalHyperparameter(name='p5', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
"""
l2= CSH.OrdinalHyperparameter(name='l2', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
l3= CSH.OrdinalHyperparameter(name='l3', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
l4= CSH.OrdinalHyperparameter(name='l4', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
l5= CSH.OrdinalHyperparameter(name='l5', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
l6= CSH.OrdinalHyperparameter(name='l6', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
l7= CSH.OrdinalHyperparameter(name='l7', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')
"""
#cs.add_hyperparameters([p0,p1,p2,p3,l0,l1,l2,l3,l4,l5,l6,l7])
cs.add_hyperparameters([p0,p1,p2,p3,p4,p5])
#cs.add_hyperparameters([p0,p1,p2,p3])
#cond1 = CS.InCondition(p1, p0, ['#pragma clang loop(j2) pack array(A) allocate(malloc)'])
#cs.add_condition(cond1)
# problem space
task_space = None
input_space = cs
output_space = Space([
Real(0.0, inf, name="time")
])
dir_path = os.path.dirname(os.path.realpath(__file__))
kernel_idx = dir_path.rfind('/')
kernel = dir_path[kernel_idx+1:]
obj = Plopper(dir_path+'/mmp.c',dir_path)
#x1=['p0','p1','p2','p3','l0','l1','l2','l3','l4','l5','l6','l7']
x1=['p0','p1','p2','p3','p4','p5']
#x1=['p0','p1','p2','p3']
nparams = len( x1 )
def myobj(point: dict):
def plopper_func(x):
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
values = [ point[k] for k in x1 ]
print('VALUES:',point[x1[0]])
# params = ["P0","P1","P2","P3","L0","L1","L2","L3","L4","L5","L6","L7"]
params = ["P0","P1","P2","P3","L0","L1"]
# params = ["P0","P1","P2","P3"]
result = obj.findRuntime(values, params)
# print( "RESULT: ", result )
return result
x = np.array([point[f'p{i}'] for i in range(len(point))])
results = plopper_func(x)
print('OUTPUT:%f',results)
return results
Problem = TuningProblem(
task_space=None,
input_space=input_space,
output_space=output_space,
objective=myobj,
constraints=None,
model=None
)
#Problem.checkcfg()
|
import numpy as np
import gdspy as gd
import gds_tools as gtools
#================================================
# Define healing function for transmission lines \\
#=========================================================================
# Initialization: radius : radius of healing circle ||
# endpoint : (x, y) of circle center ||
# (optional) npoints : number of points to use for circum. ||
# (optional) layer : layer to put healer on ||
#=========================================================================
def circle(radius, endpoint, npoints = 100, layer = 0, datatype = 0):
c = gd.Round(endpoint, radius, number_of_points = npoints, layer = layer, datatype = datatype)
c = gtools.classes.GDStructure(c, {'A': endpoint}, {'A': 2*radius})
return c
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #내 맥북에서 발생되는 에러를 없애기 위한 코드
import tensorflow as tf
X = [1, 2, 3]
Y = [1, 2, 3]
W = tf.Variable(5.)
hypothesis = X * W
#미분에 의한 수식
gradient = tf.reduce_mean((W * X - Y) * X) * 2
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
#gradient를 임의로 조정할 때 사용할 수 있
#Get gradients
gvs = optimizer.compute_gradients(cost)
#Apply gradients
apply_gradients = optimizer.apply_gradients(gvs)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(100):
print(step, sess.run([gradient, W, gvs]))
sess.run(apply_gradients) |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class CreateDBInstanceHourRequest(AbstractModel):
"""CreateDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10
:type ReplicateSetNum: int
:param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2
:type SecondaryNum: int
:param EngineVersion: MongoDB引擎版本,值包括:MONGO_2、MONGO_3_MMAP、MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT
:type EngineVersion: str
:param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆
:type Machine: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 可用区信息,格式如:ap-guangzhou-2
:type Zone: str
:param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例
:type InstanceRole: str
:param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群
:type InstanceType: str
:param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密
:type Encrypt: int
:param VpcId: 私有网络ID,如果不传则默认选择基础网络
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type SubnetId: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
"""
self.Memory = None
self.Volume = None
self.ReplicateSetNum = None
self.SecondaryNum = None
self.EngineVersion = None
self.Machine = None
self.GoodsNum = None
self.Zone = None
self.InstanceRole = None
self.InstanceType = None
self.Encrypt = None
self.VpcId = None
self.SubnetId = None
self.ProjectId = None
self.SecurityGroup = None
def _deserialize(self, params):
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.ReplicateSetNum = params.get("ReplicateSetNum")
self.SecondaryNum = params.get("SecondaryNum")
self.EngineVersion = params.get("EngineVersion")
self.Machine = params.get("Machine")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.InstanceRole = params.get("InstanceRole")
self.InstanceType = params.get("InstanceType")
self.Encrypt = params.get("Encrypt")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
class CreateDBInstanceHourResponse(AbstractModel):
"""CreateDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
class CreateDBInstanceRequest(AbstractModel):
"""CreateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param SecondaryNum: 每个副本集内从节点个数
:type SecondaryNum: int
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param MongoVersion: 版本号,当前仅支持 MONGO_3_WT
:type MongoVersion: str
:param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆
:type MachineCode: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 实例所属区域名称,格式如:ap-guangzhou-2
:type Zone: str
:param TimeSpan: 时长,购买月数
:type TimeSpan: int
:param Password: 实例密码
:type Password: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
:param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络
:type UniqVpcId: str
:param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type UniqSubnetId: str
"""
self.SecondaryNum = None
self.Memory = None
self.Volume = None
self.MongoVersion = None
self.MachineCode = None
self.GoodsNum = None
self.Zone = None
self.TimeSpan = None
self.Password = None
self.ProjectId = None
self.SecurityGroup = None
self.UniqVpcId = None
self.UniqSubnetId = None
def _deserialize(self, params):
self.SecondaryNum = params.get("SecondaryNum")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.MongoVersion = params.get("MongoVersion")
self.MachineCode = params.get("MachineCode")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.TimeSpan = params.get("TimeSpan")
self.Password = params.get("Password")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
class CreateDBInstanceResponse(AbstractModel):
"""CreateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
class TerminateDBInstanceRequest(AbstractModel):
"""TerminateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
class TerminateDBInstanceResponse(AbstractModel):
"""TerminateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 订单ID,表示注销实例成功
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class UpgradeDBInstanceHourRequest(AbstractModel):
"""UpgradeDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
class UpgradeDBInstanceHourResponse(AbstractModel):
"""UpgradeDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
class UpgradeDBInstanceRequest(AbstractModel):
"""UpgradeDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
class UpgradeDBInstanceResponse(AbstractModel):
"""UpgradeDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId") |
# Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
import unittest
import warnings
import os
import math
from random import shuffle, random
from collections import Counter,OrderedDict
from dictset import DictSet,_rep_generator
from math import isnan, isinf, floor
import numpy as np
from pprint import pprint as pp
from pyvttbl import PyvtTbl
from pyvttbl import DataFrame
from pyvttbl.plotting import *
from pyvttbl.stats import *
from pyvttbl.misc.support import *
class Test_anova_within(unittest.TestCase):
def test0(self):
## Within test
R = """\
ERROR ~ TIMEOFDAY * COURSE * MODEL
TESTS OF WITHIN SUBJECTS EFFECTS
Measure: ERROR
Source Type III eps df MS F Sig. et2_G Obs. SE 95% CI lambda Obs.
SS Power
=====================================================================================================================================================
TIMEOFDAY Sphericity Assumed 140.167 - 1 140.167 120.143 0.008 3.391 27 0.456 0.894 1621.929 1
Greenhouse-Geisser 140.167 1 1 140.167 120.143 0.008 3.391 27 0.456 0.894 1621.929 1
Huynh-Feldt 140.167 1 1 140.167 120.143 0.008 3.391 27 0.456 0.894 1621.929 1
Box 140.167 1 1 140.167 120.143 0.008 3.391 27 0.456 0.894 1621.929 1
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(TIMEOFDAY) Sphericity Assumed 2.333 - 2 1.167
Greenhouse-Geisser 2.333 1 2 1.167
Huynh-Feldt 2.333 1 2 1.167
Box 2.333 1 2 1.167
-----------------------------------------------------------------------------------------------------------------------------------------------------
COURSE Sphericity Assumed 56.778 - 2 28.389 1022.000 3.815e-06 1.374 18 0.056 0.109 9198.000 1
Greenhouse-Geisser 56.778 0.501 1.002 56.667 1022.000 9.664e-04 1.374 18 0.056 0.109 9198.000 1
Huynh-Feldt 56.778 0.504 1.008 56.336 1022.000 9.349e-04 1.374 18 0.056 0.109 9198.000 1
Box 56.778 0.500 1 56.778 1022.000 9.770e-04 1.374 18 0.056 0.109 9198.000 1
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(COURSE) Sphericity Assumed 0.111 - 4 0.028
Greenhouse-Geisser 0.111 0.501 2.004 0.055
Huynh-Feldt 0.111 0.504 2.016 0.055
Box 0.111 0.500 2 0.056
-----------------------------------------------------------------------------------------------------------------------------------------------------
MODEL Sphericity Assumed 51.444 - 2 25.722 92.600 4.470e-04 1.245 18 0.176 0.345 833.400 1
Greenhouse-Geisser 51.444 0.507 1.013 50.770 92.600 0.010 1.245 18 0.176 0.345 833.400 1.000
Huynh-Feldt 51.444 0.527 1.054 48.817 92.600 0.009 1.245 18 0.176 0.345 833.400 1.000
Box 51.444 0.500 1 51.444 92.600 0.011 1.245 18 0.176 0.345 833.400 1.000
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(MODEL) Sphericity Assumed 1.111 - 4 0.278
Greenhouse-Geisser 1.111 0.507 2.027 0.548
Huynh-Feldt 1.111 0.527 2.108 0.527
Box 1.111 0.500 2 0.556
-----------------------------------------------------------------------------------------------------------------------------------------------------
TIMEOFDAY * Sphericity Assumed 5.444 - 2 2.722 2.085 0.240 0.132 9 0.540 1.057 9.383 0.446
COURSE Greenhouse-Geisser 5.444 0.814 1.628 3.345 2.085 0.255 0.132 9 0.540 1.057 9.383 0.373
Huynh-Feldt 5.444 1 2 2.722 2.085 0.240 0.132 9 0.540 1.057 9.383 0.446
Box 5.444 0.500 1 5.444 2.085 0.286 0.132 9 0.540 1.057 9.383 0.244
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(TIMEOFDAY * Sphericity Assumed 5.222 - 4 1.306
COURSE) Greenhouse-Geisser 5.222 0.814 3.255 1.604
Huynh-Feldt 5.222 1 4 1.306
Box 5.222 0.500 2 2.611
-----------------------------------------------------------------------------------------------------------------------------------------------------
TIMEOFDAY * Sphericity Assumed 16.778 - 2 8.389 37.750 0.003 0.406 9 0.223 0.436 169.875 1.000
MODEL Greenhouse-Geisser 16.778 0.540 1.079 15.545 37.750 0.021 0.406 9 0.223 0.436 169.875 0.993
Huynh-Feldt 16.778 0.571 1.142 14.697 37.750 0.018 0.406 9 0.223 0.436 169.875 0.996
Box 16.778 0.500 1 16.778 37.750 0.025 0.406 9 0.223 0.436 169.875 0.985
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(TIMEOFDAY * Sphericity Assumed 0.889 - 4 0.222
MODEL) Greenhouse-Geisser 0.889 0.540 2.159 0.412
Huynh-Feldt 0.889 0.571 2.283 0.389
Box 0.889 0.500 2 0.444
-----------------------------------------------------------------------------------------------------------------------------------------------------
COURSE * Sphericity Assumed 8.778 - 4 2.194 3.762 0.052 0.212 6 0.367 0.719 11.286 0.504
MODEL Greenhouse-Geisser 8.778 0.354 1.415 6.204 3.762 0.157 0.212 6 0.367 0.719 11.286 0.223
Huynh-Feldt 8.778 0.354 1.415 6.204 3.762 0.157 0.212 6 0.367 0.719 11.286 0.223
Box 8.778 0.500 2 4.389 3.762 0.120 0.212 6 0.367 0.719 11.286 0.292
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(COURSE * Sphericity Assumed 4.667 - 8 0.583
MODEL) Greenhouse-Geisser 4.667 0.354 2.830 1.649
Huynh-Feldt 4.667 0.354 2.830 1.649
Box 4.667 0.500 4 1.167
-----------------------------------------------------------------------------------------------------------------------------------------------------
TIMEOFDAY * Sphericity Assumed 2.778 - 4 0.694 1.923 0.200 0.067 3 0.408 0.800 2.885 0.152
COURSE * Greenhouse-Geisser 2.778 0.290 1.159 2.397 1.923 0.293 0.067 3 0.408 0.800 2.885 0.087
MODEL Huynh-Feldt 2.778 0.290 1.159 2.397 1.923 0.293 0.067 3 0.408 0.800 2.885 0.087
Box 2.778 0.500 2 1.389 1.923 0.260 0.067 3 0.408 0.800 2.885 0.109
-----------------------------------------------------------------------------------------------------------------------------------------------------
Error(TIMEOFDAY * Sphericity Assumed 2.889 - 8 0.361
COURSE * Greenhouse-Geisser 2.889 0.290 2.318 1.246
MODEL) Huynh-Feldt 2.889 0.290 2.318 1.246
Box 2.889 0.500 4 0.722
TABLES OF ESTIMATED MARGINAL MEANS
Estimated Marginal Means for TIMEOFDAY
TIMEOFDAY Mean Std. Error 95% Lower Bound 95% Upper Bound
==================================================================
T1 5.778 0.457 4.882 6.674
T2 2.556 0.229 2.108 3.003
Estimated Marginal Means for COURSE
COURSE Mean Std. Error 95% Lower Bound 95% Upper Bound
===============================================================
C1 5.222 0.608 4.031 6.414
C2 4.500 0.562 3.399 5.601
C3 2.778 0.432 1.931 3.625
Estimated Marginal Means for MODEL
MODEL Mean Std. Error 95% Lower Bound 95% Upper Bound
==============================================================
M1 5.333 0.686 3.989 6.678
M2 4.222 0.558 3.129 5.315
M3 2.944 0.328 2.301 3.588
Estimated Marginal Means for TIMEOFDAY * COURSE
TIMEOFDAY COURSE Mean Std. Error 95% Lower Bound 95% Upper Bound
===========================================================================
T1 C1 7.222 0.641 5.966 8.478
T1 C2 6.111 0.790 4.564 7.659
T1 C3 4 0.577 2.868 5.132
T2 C1 3.222 0.401 2.437 4.007
T2 C2 2.889 0.261 2.378 3.400
T2 C3 1.556 0.294 0.979 2.132
Estimated Marginal Means for TIMEOFDAY * MODEL
TIMEOFDAY MODEL Mean Std. Error 95% Lower Bound 95% Upper Bound
==========================================================================
T1 M1 7.444 0.835 5.807 9.081
T1 M2 6.111 0.512 5.107 7.115
T1 M3 3.778 0.465 2.867 4.689
T2 M1 3.222 0.434 2.372 4.073
T2 M2 2.333 0.408 1.533 3.133
T2 M3 2.111 0.261 1.600 2.622
Estimated Marginal Means for COURSE * MODEL
COURSE MODEL Mean Std. Error 95% Lower Bound 95% Upper Bound
=======================================================================
C1 M1 6.667 1.085 4.540 8.794
C1 M2 5.167 1.195 2.825 7.509
C1 M3 3.833 0.601 2.656 5.011
C2 M1 6.167 1.195 3.825 8.509
C2 M2 4.167 0.792 2.614 5.720
C2 M3 3.167 0.477 2.231 4.102
C3 M1 3.167 0.872 1.457 4.877
C3 M2 3.333 0.882 1.605 5.062
C3 M3 1.833 0.307 1.231 2.436
Estimated Marginal Means for TIMEOFDAY * COURSE * MODEL
TIMEOFDAY COURSE MODEL Mean Std. Error 95% Lower Bound 95% Upper Bound
===================================================================================
T1 C1 M1 9 0.577 7.868 10.132
T1 C1 M2 7.667 0.333 7.013 8.320
T1 C1 M3 5 0.577 3.868 6.132
T1 C2 M1 8.667 0.882 6.938 10.395
T1 C2 M2 5.667 0.882 3.938 7.395
T1 C2 M3 4 0.577 2.868 5.132
T1 C3 M1 4.667 1.202 2.311 7.022
T1 C3 M2 5 0.577 3.868 6.132
T1 C3 M3 2.333 0.333 1.680 2.987
T2 C1 M1 4.333 0.333 3.680 4.987
T2 C1 M2 2.667 0.882 0.938 4.395
T2 C1 M3 2.667 0.333 2.013 3.320
T2 C2 M1 3.667 0.333 3.013 4.320
T2 C2 M2 2.667 0.333 2.013 3.320
T2 C2 M3 2.333 0.333 1.680 2.987
T2 C3 M1 1.667 0.333 1.013 2.320
T2 C3 M2 1.667 0.882 -0.062 3.395
T2 C3 M3 1.333 0.333 0.680 1.987
"""
df=DataFrame()
fname='data/error~subjectXtimeofdayXcourseXmodel.csv'
df.read_tbl(fname)
aov=df.anova('ERROR',wfactors=['TIMEOFDAY','COURSE','MODEL'])
## print(aov)
self.assertEqual(str(aov),R)
def suite():
return unittest.TestSuite((
unittest.makeSuite(Test_anova_within)
))
if __name__ == "__main__":
# run tests
runner = unittest.TextTestRunner()
runner.run(suite())
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present glastheim.pe
"""
from dataclasses import fields
from django import forms
from apps.certify.models import soat, citv, src, svct
class SOATForm(forms.ModelForm):
class Meta:
model = soat
fields = [
'policy',
'certify',
'insurance_company',
'number',
'registration_date',
'date_expiry',
'category',
'vin_serie',
'insured',
'amount',
'file',
'vehicles',
'owners',
'status'
]
labels = {
'policy':'N° de Polisa de seguros',
'certify':'Certificado',
'insurance_company':'Compañia de seguros',
'number':'Numero',
'registration_date':'Fecha de registro',
'date_expiry':'Fecha de expiracion',
'category':'Categoria',
'vin_serie':'N° VIN',
'insured':'asegurada',
'amount':'Monto',
'file':'Archivo',
'vehicles':'Vehiculo',
'owners':'Propietarios',
'status':'Estado'
}
widgets = {
'policy' : forms.TextInput(attrs={'class':'form-control form-control-lg'}),
'certify': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'insurance_company': forms.Select(attrs={'class':'form-control form-control-sm select-single', 'style': 'width:100%'}),
'number': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'registration_date': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'date_expiry': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'category': forms.Select(attrs={'class':'form-control'}),
'vin_serie': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'insured': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'amount': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'file': forms.FileInput(attrs={'class':'form-control'}),
'owners': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'status': forms.CheckboxInput(attrs={'class':'form-check-input'}),
}
class CITVForm(forms.ModelForm):
class Meta:
model = citv
fields = [
'id',
'Registration_date',
'expiration_date',
'inspection_result',
'comment',
'Type_of_inspection',
'file',
'vehicle',
'type_service',
'scope',
'status'
]
widgets = {
'id': forms.TextInput(attrs={'class':'form-control form-control-lg'}),
'Registration_date': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'expiration_date':forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'inspection_result': forms.CheckboxInput(attrs={'class':'form-check-input'}),
'comment' : forms.Textarea(attrs={'class':'form-control', 'rows': 3, 'placeholder': 'Comentarios'}),
'Type_of_inspection': forms.TextInput(attrs={'class':'form-control form-control-sm'}),
'file' : forms.FileInput(attrs={'class':'form-control'}),
'type_service': forms.Select(attrs={'class':'form-control'}),
'scope': forms.Select(attrs={'class':'form-control'}),
'status': forms.CheckboxInput(attrs={'class':'form-check-input'}),
}
class SRCForm(forms.ModelForm):
class Meta:
model = src
fields = {
'name',
'registration_date',
'date_expiry',
'file',
'vehicles',
'status'
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control form-control-lg'}),
'registration_date': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'date_expiry': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'file': forms.FileInput(attrs={'class':'form-control'}),
'status': forms.CheckboxInput(attrs={'class':'form-check-input'})
}
class SVCTForm(forms.ModelForm):
class Meta:
model = svct
fields = {
'name',
'registration_date',
'date_expiry',
'file',
'vehicles',
'status'
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control form-control-lg'}),
'registration_date': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'date_expiry': forms.TextInput(attrs={'class':'form-control form-control-sm', 'type': 'date'}),
'file': forms.FileInput(attrs={'class':'form-control'}),
'status': forms.CheckboxInput(attrs={'class':'form-check-input'})
} |
from config.celery_conf import celery_app
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'instadownload.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import requests
import urllib.request
from selenium import webdriver
from preview import Ui_Dialog_preview
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(533, 265)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/images/icon.jpeg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("background-image: url(:/images/images/background.jpg);\n"
"background-position: center center;\n"
"background-repeat: no-repeat;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(20, 76, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(56, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Print")
self.label.setFont(font)
self.label.setStyleSheet("background-image: url(:/images/images/transparent.jpg);")
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 1, 1, 3)
spacerItem2 = QtWidgets.QSpacerItem(55, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 1, 4, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(62, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem3, 2, 0, 1, 2)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setStyleSheet("background-image: url(:/images/images/transparent.jpg);")
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 2, 2, 1, 1)
self.toolButton = QtWidgets.QToolButton(self.centralwidget)
self.toolButton.setStyleSheet("background-image: url(:/images/images/transparent.jpg);")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/images/download.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon1)
self.toolButton.setObjectName("toolButton")
self.gridLayout.addWidget(self.toolButton, 2, 3, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(55, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 2, 4, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(20, 78, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem5, 3, 2, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.toolButton.clicked.connect(self.parse)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "INSTADOWNLOAD"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt; font-weight:600;\">Welcome to INSTADOWNLOAD!</span></p></body></html>"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Enter instagram post URL here...."))
self.toolButton.setText(_translate("MainWindow", "..."))
def parse(self):
options = webdriver.firefox.options.Options()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.get(self.lineEdit.text())
get_div = driver.find_element_by_class_name('FFVAD')
self.photolink = get_div.get_attribute('src')
#print(photolink)
#urllib.request.urlretrieve(photolink, 'INSTAPHOTO.jpg')
self.preview()
def preview(self):
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog_preview()
ui.setupUi(Dialog, self.photolink)
Dialog.show()
rsp = Dialog.exec_()
if rsp == QtWidgets.QDialog.Accepted:
urllib.request.urlretrieve(self.photolink, 'INSTAPHOTO.jpg')
import instadownload_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
class InvaldSudoku(Exception):
pass
class NoNSolvable(Exception):
pass
def checksquare(num, i, j, grid):
squarerow = i//3
squarecol = j//3
for rown in range(3):
for coln in range(3):
if grid[int(squarerow*3) + rown][j] == num:
return False
if grid[i][int(squarecol*3) + coln] == num:
return False
return True
def check(num, i, j, grid):
for k in range(9):
if grid[k][j] == num or grid[i][k] == num:
return False
return checksquare(num, i, j, grid)
def bASolve(grid):
for i in range(9):
for j in range(9):
number = grid[i][j]
if number == 0:
for n in range(1, 10):
if(check(n, i, j, grid)):
grid[i][j] = n
if bASolve(grid):
return True
grid[i][j] = 0
return False
return True
def bASolverHandle(grid):
if any([not num<10 for row in grid for num in row]):
raise NoNSolvable("Sudoku could not be solved")
if len(grid)!=9 or len(grid[0])!=9:
raise InvaldSudoku("Sudoku is not Valid, it has to be 9x9")
return False
bASolve(grid)
if any([not 0<num<10 for row in grid for num in row]):
raise NoNSolvable("Sudoku could not be solved")
return grid
if __name__ == "__main__":
pass
|
from numpy.testing import *
import pymc
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
def test_dependencies():
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
for dirname, dirs, files in os.walk('..'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
|
# -*- coding: utf-8 -*-
'''
SNS type: status, user, comment
'''
import hashlib
import utils
from errors import snserror
from snsconf import SNSConf
from snslog import SNSLog as logger
class BooleanWrappedData:
def __init__(self, boolval, data=None):
self.boolval = boolval
self.data = data
def __nonzero__(self):
return self.boolval
def __eq__(self, other):
if self.boolval ^ bool(other):
return False
else:
return True
def __unicode__(self):
return unicode((self.boolval, self.data))
def __str__(self):
return str((self.boolval, self.data))
def __repr__(self):
return repr((self.boolval, self.data))
class MessageID(utils.JsonDict):
"""
All information to locate one status is here.
It shuold be complete so that:
* one can invoke reply() function of plugin on this object.
* Or one can invoke reply() function of container on this object.
There are two mandatory fields:
* platform: Name of the platform (e.g. RenrenStatus)
* channel: Name of the instantiated channel
(e.g. 'renren_account_1').
Same as a channel's ``.jsonconf['channel_name']``.
In order to reply one status, here's the information
required by each platforms:
* Renren: the status_id and source_user_id
* Sina: status_id
* QQ: status_id
**NOTE**: This object is mainly for SNSAPI to identify a Message.
Upper layer had better not to reference fields of this object directly.
If you must reference this object, please do not touch those
non-mandatory fields.
"""
def __init__(self, platform = None, channel = None):
super(MessageID, self).__init__()
self.platform = platform
self.channel = channel
#def __str__(self):
# """docstring for __str__"""
# return "(p:%s|sid:%s|uid:%s)" % \
# (self.platform, self.status_id, self.source_user_id)
def __str__(self):
return self._dumps()
class Message(utils.JsonDict):
'''
The Message base class for SNSAPI
Data Fields:
* ``platform``: a string describing the platform
where this message come from. See 'snsapi/platform.py'
for more details.
* ``raw``: the raw json or XML object returned from
the platform spefiic API. This member is here to give
upper layer developers the last chance of manipulating
any available information. Having an understanding of
the platform-specific returning format is esential.
* ``parsed``: this member abstracts some common fields
that all messages are supposed to have. e.g. 'username',
'time', 'text', etc.
* ``ID``: a ``MessageID`` object. This ID should be enough
to indentify a message across all different platforms.
For details of ``ID``, please see the docstring of ``MessageID``.
Mandatory fields of ``parsed`` are:
* ``time:`` a utc integer. (some platform returns parsed string)
* ``userid:`` a string. (called as "username" at some platform)
* ``username:`` a string. (called as "usernick" as some platform)
* ``text:`` a string. (can be 'text' in the returning json object,
or parsed from other fields.)
* ``attachments``: an array of attachments. Each attachment is:
``{'type': TYPE, 'format': [FORMAT1, FORMAT2, ...], 'data': DATA}``.
TYPE can be one of ``link``, ``picture``, ``album``, ``video``, ``blog``.
FORMAT can be ``link``, ``binary``, ``text`` and ``other``.
DATA is your data presented in FORMAT.
Optional fields of 'parsed' are:
* ``deleted``: Bool. For some OSN.
* ``reposts_count``: an integer. For some OSN.
* ``comments_count``: an integer. For some OSN.
* ``link``: a string. For RSS; Parsed from microblog message;
Parsed from email message; etc.
* ``title``: a string. For RSS; Blog channel of some OSN.
* ``description``: a string. For RSS digest text;
Sharing channel of some OSN; etc.
* ``body``: a string. The 'content' of RSS, the 'body' of HTML,
or whatever sematically meaning the body of a document.
* ``text_orig``: a string. The original text, also known as
"root message" in some context. e.g. the earliest status
in one thread.
* ``text_last``: a string. The latest text, also known as
"message" in some context. e.g. the reply or forwarding
comments made by the last user.
* ``text_trace``: a string. Using any (can be platform-specific)
method to construt the trace of this message. e.g.
the forwarding / retweeting / reposting sequence.
There is no unified format yet.
* ``username_origin``: a string. The username who posts 'text_orig'.
'''
platform = "SNSAPI"
def __init__(self, dct = None, platform = None, channel = None, conf = {}):
self.conf = conf
self['deleted'] = False
self['ID'] = MessageID(platform, channel)
self['raw'] = utils.JsonDict({})
self['parsed'] = utils.JsonDict({'attachments' : []})
if dct:
self['raw'] = utils.JsonDict(dct)
try:
self.parse()
except KeyError as e:
raise snserror.type.parse(str(e))
def parse(self):
'''
Parse self.raw and store result in self.parsed
'''
# Default action: copy all fields in 'raw' to 'parsed'.
self.parsed.update(self.raw)
def show(self):
'''
Level 1 serialization and print to console
See dump()
'''
utils.console_output(unicode(self))
def __str__(self):
'''
Level 1 serialization and convert to str using console encoding
See dump()
'''
return unicode(self).encode(SNSConf.SNSAPI_CONSOLE_STDOUT_ENCODING)
def __unicode__(self):
'''
Level 1 serialization and convert to unicode
See dump()
'''
# NOTE:
#
# dump() method remains stable because the downstream is
# digest methods. The __str__ and __unicode__ are only
# for console interaction. Normal apps should refer to
# those fields in 'parsed' themselves.
#
# We limit the output to 500 characters to make the console
# output uncluttered.
return unicode("[%s] at %s \n %s") % (self.parsed.username,
utils.utc2str(self.parsed.time),
self.parsed.text[0:500])
def dump(self, tz=None):
'''
Level 1 serialization: console output.
This level targets console output. It only digests essnetial
information which end users can understand. e.g. the text
of a status is digested whereas the ID fields is not digested.
To control the format, please rewrite dump() in derived Message class.
See also __str__(), __unicode__(), show()
'''
if tz:
return unicode("[%s] at %s \n %s") % \
(self.parsed.username, utils.utc2str(self.parsed.time, tz), self.parsed.text)
else:
return unicode("[%s] at %s \n %s") % \
(self.parsed.username, utils.utc2str(self.parsed.time), self.parsed.text)
def dump_parsed(self):
'''
Level 2 serialization: interface output.
This level targets both Python class interface and
STDIO/STDOUT interface. The output of all kinds of
Messages conform to the same format. The json object
can be used to pass information in/out SNSAPI using
Python class. It is also able to pretty print, so
that the STDOUT result is easy to parse in any
language.
'''
return self.parsed._dumps_pretty()
def dump_full(self):
'''
Level 3 serialization: complete output.
This level targets more sophisticated applications.
The basic function of SNSAPI is to unify different
formats. That's what the first two level of
serialization do. However, app developers may want
more sophisticated processing. We serialize the full
Message object through this function. In this way,
app developers can get all information they need.
Note that knowledge of the platform specific return
format is essential. We conclude their fields in:
* https://github.com/hupili/snsapi/wiki/Status-Attributes
This wiki page may not always be up to date. Please
refer to the offical API webpage for more info.
'''
return self._dumps()
def digest(self):
'''
Digest the message content. This value is useful in
for example forwarding services to auto-reply services,
for those applications requires message deduplication.
It corresponds to dump().
Note: different messages may be regarded as the same
according to this digest function.
'''
from utils import FixedOffsetTimeZone
tz = FixedOffsetTimeZone(0, 'GMT')
return hashlib.sha1(self.dump(tz=tz).encode('utf-8')).hexdigest()
def digest_parsed(self):
'''
It corresponds to dump_parsed()
'''
return hashlib.sha1(self.dump_parsed().encode('utf-8')).hexdigest()
def digest_full(self):
'''
It corresponds to dump_full()
'''
return hashlib.sha1(self.dump_full().encode('utf-8')).hexdigest()
class MessageList(list):
"""
A list of Message object
"""
def __init__(self, init_list=None):
super(MessageList, self).__init__()
if init_list:
self.extend(init_list)
def append(self, e):
if isinstance(e, Message):
if hasattr(e, 'deleted') and e.deleted:
logger.debug("Trying to append Deleted Message type element. Ignored")
else:
super(MessageList, self).append(e)
else:
logger.debug("Trying to append non- Message type element. Ignored")
def extend(self, l):
if isinstance(l, MessageList):
super(MessageList, self).extend(l)
elif isinstance(l, list):
# We still extend the list if the user asks to.
# However, a warning will be placed. Doing this
# may violate some properties of MessageList, e.g.
# there is no Deleted Message in the list.
super(MessageList, self).extend(l)
logger.warning("Extend MessageList with non MessageList list.")
else:
logger.warning("Extend MessageList with unknown type.")
def __str__(self):
tmp = ""
no = 0
for s in self:
tmp = tmp + "<%d>\n%s\n" % (no, str(s))
no = no + 1
return tmp
def __unicode__(self):
tmp = ""
no = 0
for s in self:
tmp = tmp + "<%d>\n%s\n" % (no, unicode(s))
no = no + 1
return tmp
class User(object):
def __init__(self, jobj=None):
self.id = 0
class AuthenticationInfo(utils.JsonObject):
# default auth configurations
def __init__(self, auth_info = None):
if auth_info :
self.update(auth_info)
else :
self.callback_url = None
self.cmd_fetch_code = "(default)"
self.cmd_request_url = "(default)"
self.save_token_file = "(default)"
self.login_username = None
self.login_password = None
def set_defaults(self):
DEFAULT_MAPPING = {
"cmd_request_url": "(local_webserver)+(webbrowser)",
"cmd_fetch_code": "(local_webserver)"
}
for (k,v) in DEFAULT_MAPPING.items():
if (not (k in self)) or (self[k] == "(default)"):
self[k] = DEFAULT_MAPPING[k]
if __name__ == "__main__":
import time
m1 = Message({'text': 'test',
'username': 'snsapi',
'userid': 'snsapi',
'time': time.time() })
m2 = Message({'text': u'测试',
'username': 'snsapi',
'userid': 'snsapi',
'time': time.time() })
ml = MessageList()
ml.append(m1)
ml.append(m2)
# NOTE:
# When you develop new plugins, the MessageList returned
# by your ``home_timeline`` should be printable in this
# way. This is minimum checking for whether you have
# mandatory fields.
print ml
|
from pathlib import Path
from typing import Optional
import nbformat
from nbclient.client import (
CellExecutionError,
CellTimeoutError,
NotebookClient,
)
from nbformat import NotebookNode
from .nb_result import NotebookError, NotebookResult
NB_VERSION = 4
class NotebookRun:
filename: Path
verbose: bool
def __init__(
self,
filename: Path,
default_timeout: int,
verbose: bool = False,
kernel: Optional[str] = None,
) -> None:
self.filename = filename
self.verbose = verbose
self.default_timeout = default_timeout
self.kernel = kernel
def execute(
self,
) -> NotebookResult:
nb = nbformat.read(str(self.filename), NB_VERSION)
for cell in nb.cells:
if cell.cell_type == "code":
cell.outputs = []
timeout = self.default_timeout
allow_errors = False
if "execution" in nb.metadata:
if "timeout" in nb.metadata.execution:
timeout = nb.metadata.execution.timeout
if "allow_errors" in nb.metadata.execution:
allow_errors = nb.metadata.execution.allow_errors
error: Optional[NotebookError] = None
extra_kwargs = {}
if self.kernel:
extra_kwargs["kernel_name"] = self.kernel
try:
c = NotebookClient(
nb,
timeout=timeout,
allow_errors=allow_errors,
record_timing=True,
**extra_kwargs,
)
c.execute(cwd=self.filename.parent)
except CellExecutionError:
error = self._get_error(nb)
except CellTimeoutError as err:
trace = err.args[0]
error = NotebookError(
summary=trace.split("\n")[0],
trace=trace,
failing_cell_index=self._get_timeout_cell(nb),
)
except Exception as err:
# if at top causes https://github.com/jupyter/nbclient/issues/128
# from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel
# trace=f"{summary}\n\nInstalled Kernels: {str(KernelSpecManager().find_kernel_specs())}",
# https://github.com/treebeardtech/nbmake/runs/1536896858?check_suite_focus=true
if str(type(err)) != "<class 'jupyter_client.kernelspec.NoSuchKernel'>":
raise err
summary = f"Error - No such kernel: '{err.name}'" # type: ignore
error = NotebookError(
summary=summary,
trace=f"{summary}",
failing_cell_index=0,
)
return NotebookResult(nb=nb, error=error)
def _get_timeout_cell(self, nb: NotebookNode) -> int:
for i, cell in enumerate(nb.cells):
if cell.cell_type != "code":
continue
if "shell.execute_reply" not in cell.metadata.execution:
return i
return -1
def _get_error(self, nb: NotebookNode) -> Optional[NotebookError]:
for i, cell in reversed(list(enumerate(nb["cells"]))): # get LAST error
if cell["cell_type"] != "code":
continue
errors = [
output
for output in cell["outputs"]
if output["output_type"] == "error" or "ename" in output
]
if errors:
tb = "\n".join(errors[0].get("traceback", ""))
src = "".join(cell["source"])
last_trace = tb.split("\n")[-1]
line = 75 * "-"
trace = f"{line}\n{src}\n{tb}"
return NotebookError(
summary=last_trace, trace=trace, failing_cell_index=i
)
return None
|
import dlib
from imutils import face_utils
import cv2
import time
colors = [(19, 199, 109), (79, 76, 240), (230, 159, 23),
(168, 100, 168), (158, 163, 32),
(163, 38, 32), (180, 42, 220), (79, 76, 240), (230, 159, 23),
(168, 100, 168), (158, 163, 32),
(163, 38, 32), (180, 42, 220), (79, 76, 240), (230, 159, 23),
(168, 100, 168), (158, 163, 32),
(163, 38, 32), (180, 42, 220)]
shape = dlib.shape_predictor("faceShape.dat")
cam = cv2.VideoCapture(0)
face_detector = dlib.cnn_face_detection_model_v1("human_face_detector.dat")
count = 0
while True:
start = time.time()
try:
_, img = cam.read()
count += 1
img = cv2.flip(img, 1)
black = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dets = face_detector(black)
for loc in dets:
face = loc.rect
geom = shape(black, face)
geometry = face_utils.shape_to_np(geom)
landmarks = face_utils.FACIAL_LANDMARKS_68_IDXS
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
left_eye_contours = geometry[left_eye[0]:left_eye[1]]
right_eye_contours = geometry[right_eye[0]:right_eye[1]]
left_eye_contours = cv2.convexHull(left_eye_contours)
right_eye_contours = cv2.convexHull(right_eye_contours)
cv2.drawContours(img,left_eye_contours,-1,[0,0,255], 5)
cv2.drawContours(img,right_eye_contours,-1,[0,0,255], 5)
x = face.left()
y = face.top()
w = face.right() - x
h = face.bottom() - y
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
img = face_utils.visualize_facial_landmarks(img, geometry, colors=colors)
except:
pass
end = time.time()
fps = int(count/(end-start))
count = 0
cv2.putText(img, "FPS:- "+str(fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("img", img)
if cv2.waitKey(5) == 27:
cv2.destroyAllWindows()
break
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import os
import unittest
import uuid
import yaml
from marquez_client import Clients
from marquez_client.models import (DatasetType, JobType)
from marquez_client.utils import Utils
_NAMESPACE = "my-namespace"
log = logging.getLogger(__name__)
class TestMarquezWriteOnlyClientFile(unittest.TestCase):
def setUp(self):
log.debug("MarquezWriteOnlyClient.setup(): ")
with open('tests/logConfig.yaml', 'rt') as file:
yamlConfig = yaml.safe_load(file.read())
logging.config.dictConfig(yamlConfig)
log.info("loaded logConfig.yaml")
os.environ['MARQUEZ_BACKEND'] = 'file'
self.client_wo_file = Clients.new_write_only_client()
log.info("created marquez_client_wo_file.")
def test_create_namespace(self):
log.info("test_create_namespace()")
owner_name = "me"
description = "my namespace for testing."
self.client_wo_file.create_namespace(
_NAMESPACE, owner_name, description)
def test_create_dataset(self):
dataset_name = "my-dataset"
description = "My dataset for testing."
fields = [
{
"name": "flight_id",
"type": "INTEGER",
"description": "flight id"
},
{
"name": "flight_name",
"type": "VARCHAR",
"description": "flight name"
},
{
"name": "flight_date",
"type": "TIMESTAMP",
"description": "flight date"
}
]
self.client_wo_file.create_dataset(
namespace_name=_NAMESPACE,
dataset_name=dataset_name,
dataset_type=DatasetType.DB_TABLE,
run_id=str(uuid.uuid4()),
physical_name=dataset_name,
source_name='my-source',
description=description,
schema_location=None,
fields=fields,
tags=None
)
def test_create_datasource(self):
source_name = "flight_schedules_db"
source_type = 'POSTGRESQL'
source_url = "jdbc:postgresql://localhost:5432/test?" \
"user=fred&password=secret&ssl=true"
description = "PostgreSQL - flight schedules database"
self.client_wo_file.create_source(
source_name=source_name,
source_type=source_type,
connection_url=source_url,
description=description)
def test_create_job(self):
job_name = "my-job"
input_dataset = [
{
"namespace": "my-namespace",
"name": "public.mytable"
}
]
output_dataset = {
"namespace": "my-namespace",
"name": "public.mytable"
}
location = "https://github.com/my-jobs/blob/" \
"07f3d2dfc8186cadae9146719e70294a4c7a8ee8"
context = {
"SQL": "SELECT * FROM public.mytable;"
}
self.client_wo_file.create_job(
namespace_name=_NAMESPACE,
job_name=job_name,
job_type=JobType.BATCH,
location=location,
input_dataset=input_dataset,
output_dataset=output_dataset,
context=context
)
def test_create_job_run(self):
run_id = str(uuid.uuid4())
job_name = "my-job"
run_args = {
"email": "me@mycorp.com",
"emailOnFailure": "true",
"emailOnRetry": "true",
"retries": "1"
}
self.client_wo_file.create_job_run(
namespace_name=_NAMESPACE,
job_name=job_name,
run_id=run_id,
nominal_start_time=None,
nominal_end_time=None,
run_args=run_args,
mark_as_running=True
)
def test_mark_job_run_as_start(self):
run_id = str(uuid.uuid4())
action_at = Utils.utc_now()
self.client_wo_file.mark_job_run_as_started(
run_id=run_id, action_at=action_at)
def test_mark_job_run_as_completed(self):
run_id = str(uuid.uuid4())
action_at = Utils.utc_now()
self.client_wo_file.mark_job_run_as_completed(
run_id=run_id, action_at=action_at)
def test_mark_job_run_as_failed(self):
run_id = str(uuid.uuid4())
action_at = Utils.utc_now()
self.client_wo_file.mark_job_run_as_failed(
run_id=run_id, action_at=action_at)
def test_mark_job_run_as_aborted(self):
run_id = str(uuid.uuid4())
action_at = Utils.utc_now()
self.client_wo_file.mark_job_run_as_aborted(
run_id=run_id, action_at=action_at)
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
from flask import render_template, flash, redirect,request,url_for,current_app
import os
# os.chdir(r'D:\作业\小学期Python\nlp\NLP\resource\learning_nlp\chapter-8\classification')
import numpy as np
from sklearn.cross_validation import train_test_split
import numpy as np
import jieba
from app import app
from .normalization import normalize_corpus
from sklearn.externals import joblib
# from feature_extractors import bow_extractor, tfidf_extractor
from collections import Counter
def get_data(uploadpath):
'''
获取数据
:return: 文本数据,对应的labels
'''
# ../static/lib/normal.txt
# d:\Data\py\flask\app\../uploads/files\normal.txt
with open(uploadpath, encoding="utf8") as ham_f:
ham_data = ham_f.readlines()
return ham_data
def remove_empty_docs(corpus):
filtered_corpus = []
for doc in corpus:
if doc.strip():
filtered_corpus.append(doc)
return filtered_corpus
def mainAnalysis(uploadpath):
# mlp = joblib.load( 'app/static/lib/tdidf_mlp.model')
mlp = joblib.load( 'app/static/lib/name.model')
# tfidf_vectorizer = joblib.load( 'app/static/lib/tfidf_vectorizer_mlp.model')
tfidf_vectorizer = joblib.load( 'app/static/lib/vectorizer_name.model')
corpus = get_data(uploadpath)
#print("total datasets:", len(corpus))
corpus = remove_empty_docs(corpus)
#print("corpus 0 is ", corpus[0])
#print (len(corpus))
norm_train_corpus = normalize_corpus(corpus)
train_messages = []
for i in range(len(norm_train_corpus)):
seg_list = jieba.cut(norm_train_corpus[i], cut_all=False)
train_messages.append(','.join(seg_list))
#print (len(train_messages))
tfidf_train_features = tfidf_vectorizer.transform(train_messages)
#print(tfidf_train_features[0])
predictions = mlp.predict(tfidf_train_features)
# c = Counter(predictions)
# for word_freq in c.most_common(2):
# word, freq = word_freq
# if word == 1:
# print (freq / len(corpus))
# # print (word,freq)
print(predictions.tolist())
return predictions.tolist()
# if __name__ == "__main__":
# mainAnalysis(uploadpath)
|
import datetime
from types import resolve_bases
from cryptography.fernet import Fernet
from flask import Flask
import sli_database
from flask import request
import requests
from flask_cors import CORS, cross_origin
import random
from dotenv import load_dotenv
import os
from flaskext.mysql import MySQL
import pymysql
import ssl
import smtplib
from datetime import date, timedelta
import json
app = Flask(__name__)
CORS(app)
load_dotenv()
# info from .env for SLI email
EMAIL = os.getenv("EMAIL")
EMAIL_PASSWORD = os.getenv("EMAIL_PASSWORD")
# SQL_HOST = os.getenv("SQL_HOST")
# SQL_USER = os.getenv('SQL_USER')
# SQL_PASSWORD = os.getenv('SQL_PASSWORD')
# app.config['MYSQL-HOST'] = SQL_HOST
# app.config['MYSQL_USER'] = SQL_USER
# app.config['MYSQL_PASSWORD'] = SQL_PASSWORD
# app.config['MYSQL_DB'] = "sli_database"
# app.config['MYSQL_AUTOCOMMIT'] = True
# mysql = MySQL()
# mysql = MySQL(app, host = SQL_HOST, user = SQL_USER, password = SQL_PASSWORD, db = "sli_database", autocommit = True, cursorclass = pymysql.cursors.DictCursor)
# mysql.init_app(app)
# key value for encryption
key = bytes(os.getenv("ENCRYPTION_KEY"), "utf-8")
# create database instance
db = sli_database.DB(app)
'''def createUserStudent(username, password,
querynum=0,
updatenum=0,
connection_num=0):
cipher_suite = Fernet(key)
password = str.encode(password)
ciphered_password = cipher_suite.encrypt(password)
ciphered_password = bytes(ciphered_password).decode("utf-8")
cursor = db.cursor()
try:
cursor.execute("INSERT INTO student VALUES (\"%s\", \"%s\")"%(username, ciphered_password))
print("Student account successfully created.")
except Exception as Ex:
print("Error creating Student account: %s"%(Ex))'''
#createUserStudent(cursor, "user_test", "pass_test")
#createUserTeacher(cursor, 0, "email_test", "pass_test", "f_test", "l_test")
'''
Authenticates login with username and password input from login screen.
This fetches the login information for the given username passed in and then
verifies that the input password matches the decrypted password and the input
role matches the fetched role. If login is authenticated, then the a token is
generated for that user and it is passed back to the front end.
input data format:
"username": username entered on login screen
"password": password entered on login screen
"role": role entered on login screen
response data format:
"code": 1 for success, 0 for failure
"token": token generated after login authenticated
"username": user that has been logged in
"isLoggedIn": set to true for frontend sessionStorage
"role": role of user logged in
'''
@app.route("/api/authenticateLogin", methods=['POST'])
@cross_origin()
def login():
data = request.get_json(force=True)
# print(str(data) + " ,,,,,")
username = data["username"]
password = data["password"]
response = {}
records = db.getLogin(username)
if len(records) > 0:
# print(records)
# ignore encoding for testing
"""
cipher_suite = Fernet(key)
encrypted_pwd = str.encode(records[0][1])
unciphered_text = cipher_suite.decrypt(encrypted_pwd)
fetched = bytes(unciphered_text).decode("utf-8")
"""
cipher_suite = Fernet(key)
decoded_password = bytes(records[0][1]).decode("utf-8")
encrypted_pwd = str.encode(decoded_password)
unciphered_text = cipher_suite.decrypt(encrypted_pwd)
fetched = bytes(unciphered_text).decode("utf-8")
#print(password, str_pwd)
#get_password = records[0][1]
#if password == get_password:
# str_pwd = bytes(records[0][1]).decode("utf-8")
#str_pwd = records[0][1]
# print(password)
# print(str_pwd)
if password == fetched:
token = generateToken(32)
username = records[0][0]
# print(username, token, "test TOken")
setUserToken(username, token)
response["code"] = 1
response["token"] = token
response["username"] = username
response["isLoggedIn"] = True
response["role"] = records[0][2]
# print(response)
return json.dumps(response) # success
else:
return json.dumps({"code": 0}) # failure- password incorrect
else:
return json.dumps({"code": 0}) # failure- email doesn't exist
'''
Gets user information based on token. Used for displaying name
on dashboard and maintaining session after login. Also used to
verify if a user is already logged in.
input data format:
"token": session token for a specific user
response data format:
"username": user that is logged in
"isLoggedIn": set to true for frontend sessionStorage if already logged in, false otherwise
"fname": first name of user to display on dashboard
'''
@app.route("/api/getCurrentUserToken", methods=['GET'])
@cross_origin()
def getUserToken():
# data = request.get_json(force=True)
response = {}
# token = data["token"] if "token" in data else None
token = request.args.get("token", default=None)
curr_user, user_info = None, None
try:
# query user from token
if token:
curr_user = db.getUserToken(token)
# query information of user from user
if curr_user and len(curr_user) > 0:
user_name = curr_user[0][0]
user_info = db.getUserInfo(user_name)
# user already logged in with token
response["isLoggedIn"] = True
response["username"] = user_name
else:
response["isLoggedIn"] = False
# generate the response
if user_info and len(user_info) > 0:
response["fname"] = user_info[0][0]
response["code"] = 1
return json.dumps(response)
except Exception as ex:
# print(ex)
return json.dumps({"code": 0})
'''
Used for teachers to get a list of their classes to be displayed in dropdown menus
to select a particular class.
input data format:
"username": username of teacher who's classes we want
output data format:
"code": 1 for success, 0 for failure
"classes": list of all classes owned by the teacher
'''
@app.route("/api/getClassesList", methods=['GET'])
@cross_origin()
def getClassesList():
# data = request.get_json(force=True)
response = {}
try:
teacher = request.args.get("username", default=None)
# username = data["username"]
result = db.getClasses(teacher)
if result and len(result) > 0:
response["code"] = 1
response["classes"] = result
else:
response["code"] = 0
return json.dumps(response)
except Exception as ex:
# print(ex)
response["code"] = 0
return json.dumps(response)
'''
Used by teachers to get the complete list of students in a given class that the teacher
owns. This information is displayed on the manage classes page.
input data format:
"current_class": specific class that we want a student list for
"username": username of teacher who owns the class
output data format:
"code": 1 for success, 0 for failure
"studentList": list of student usernames in the given class
'''
@app.route("/api/getStudentsFromClass", methods=['GET'])
@cross_origin()
def getStudentsFromClass():
# data = request.get_json(force=True)
# print(data)
response = {}
try:
current_class = request.args.get("current_class", default=None)
teacher = request.args.get("username", default=None)
# print("#########")
# print(current_class)
# print(teacher)
if teacher and current_class:
# class_name = data["currentClass"]
# teacher = data["teacher"]
results = db.getStudentsOfClass(teacher, current_class)
if results:
# print(results)
response["studentList"] = [student[0] for student in results]
# print(response)
response["code"] = 1
return json.dumps(response)
return json.dumps({"code": 0})
except Exception as ex:
# print(ex)
return json.dumps({"code": 0})
'''
Creates a student or teacher account depending on the role passed in. Teacher
accounts are created from create account page, student accounts are made from
add student to class page. If the user is a student, the new account is also added
to the teacher's class that is passed in. If the new user is a teacher, after creating
the account, the user will be logged in.
input data format:
"role": role of new user
"username": username of new user
"password": password of new user
"conf_password": confirmation of new password
"teacher": if role is "S", username of teacher who is creating the account
"current_class": if role is "S", name of class which the student will be added to
output data format:
"code": 1 for success, 2 for invalid login info, 0 for failure
"username": if role is "T", username of user that has been created/logged in
"token": if role is "T", token of user that has been logged in
"isLoggedIn": if role is "T", whether or not user has been logged in
"role": if role is "T", role of user that has been logged in
'''
@app.route("/api/createAccount", methods=['POST'])
@cross_origin()
def createAccount():
data = request.get_json(force=True)
# print(data)
response = {}
try:
if data["password"] == data["conf_password"] and len(data["password"]) >= 8:
cipher_suite = Fernet(key)
enc_pwd = str.encode(data["password"])
ciphered_pwd = cipher_suite.encrypt(enc_pwd)
ciphered_pwd = bytes(ciphered_pwd).decode("utf-8")
try:
if data["role"] == "S":
db.createAccount(data["username"], ciphered_pwd, data["role"])
db.addStudentToClass(data["teacher"], data["current_class"], data["username"])
else:
db.createAccount(data["username"], ciphered_pwd, data["role"], data["fname"], data["lname"])
token = generateToken(32)
username = data["username"]
# print(username, token, "test TOken")
setUserToken(username, token)
response["token"] = token
response["username"] = username
response["isLoggedIn"] = True
response["role"] = data["role"]
response["code"] = 1
return json.dumps(response)
except:
return json.dumps({"code": 2})
else:
return json.dumps({"code": 0})
except:
response["code"] = 0
return json.dumps(response)
# def createUser(username, password, role, fname, lname,
# querynum=0,
# updatenum=0,
# connection_num=0):
# '''
# cipher_suite = Fernet(key)
# password = str.encode(password)
# ciphered_password = cipher_suite.encrypt(password)
# ciphered_password = bytes(ciphered_password).decode("utf-8")
# '''
# try:
# result = db.createAccount(username, password, role, fname=None, lname=None)
# print(result)
# print("Account successfully created.")
# return json.dumps(result)
# except Exception as Ex:
# print("Error creating account: %s"%(Ex))
'''
Used by teachers to create a new class. This can be found from the manage classes page.
input data format:
"username": username of teacher who is creating class
"class_name": name of new class
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/createNewClass", methods=['POST'])
@cross_origin()
def createClass():
try:
data = request.get_json(force=True)
response = {}
# print(data)
username = data["username"]
class_name = data["class_name"]
# print(username, class_name)
# role "S" = student and "T" = teacher
if username and class_name:
# print("adsfdf")
db.createNewClass(username, class_name)
response["code"] = 1
return json.dumps(response)
except Exception as ex:
return json.dumps({"code": 0})
'''
Generates an encrypted link extension that is associated to the specific teacher
and sends a valid reset password link to the teacher's email. The associated link
extension and username are also added to the database for later retrieval.
input data format:
"email": email of teacher who is resetting their password.
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/sendPasswordEmail", methods=['POST'])
def sendPasswordEmail():
data = request.get_json(force=True)
# print(data)
creatingLink = True
# creating unique link extension for user to be added to database
while creatingLink:
try:
allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
result = [allowed_chars[random.randint(0, len(allowed_chars) - 1)] for _ in range(32)]
link = "".join(result)
cipher_suite = Fernet(key)
enc_link = str.encode(link)
ciphered_link = cipher_suite.encrypt(enc_link)
ciphered_link = bytes(ciphered_link).decode("utf-8")
# print(ciphered_link)
db.createPasswordLink(data["email"], link)
creatingLink = False
except Exception as e:
# print(e)
return
try:
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = EMAIL # Enter your address
receiver_email = data["email"] # Enter receiver address
password = EMAIL_PASSWORD
# print(str_pwd)
full_link = "http://127.0.0.1:3000/resetPassword/" + ciphered_link
subject = "S.L.I. App Password Retrieval"
text = "Hello!\n\nUse this link to reset your Seed & Lead Impact App password:\n{}\n\n-The Team at Seed & Lead Impact".format(full_link)
message = "Subject: {}\n\n{}".format(subject, text)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
# print("check forgot password")
server.login(sender_email, password)
# print("check here")
server.sendmail(sender_email, receiver_email, message)
response = {"code": 1}
except Exception as e:
# print(e)
response = {"code": 0}
return json.dumps(response)
'''
Gets the associated teacher's username given the URL extension for resetting their password.
input data format:
"link": url extension associated with the intended user
output data format:
"code": 1 for success, 0 for failure
"username": username of teacher associated with the given link extension
'''
@app.route("/api/getResetLinkUser", methods=['GET'])
@cross_origin()
def getResetLinkUser():
# data = request.get_json(force=True)
# print(data)
response = {}
link = request.args.get("link", default=None)
# print(link)
try:
cipher_suite = Fernet(key)
# decoded_link = bytes(link).decode("utf-8")
encrypted_link = str.encode(link)
unciphered_link = cipher_suite.decrypt(encrypted_link)
unciphered_link = bytes(unciphered_link).decode("utf-8")
# print(unciphered_link)
results = db.getPasswordLink(unciphered_link)
if len(results) > 0:
response["username"] = results[0][0]
response["code"] = 1
else:
response["code"] = 0
except:
response["code"] = 0
# print(response)
return json.dumps(response)
'''
Allows students to log work they have completed and adds it to the database to be
counted towards campaigns and goals.
input data format:
"username": username of student logging work
"project": name of the work completed
"SDG": SDG that the work applies to
"date": date the work was completed
"hours": total hours of work
"description": short description of work completed
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/logWork", methods=['POST'])
@cross_origin()
def logWork():
data = request.get_json(force=True)
#data["date"] = datetime.strptime(data["date"],'%Y%m%d')
# print(data)
try:
username = data["username"]
project = data["project"]
sdg = data["SDG"]
work_date = data["date"]
hours = data["hours"]
description = data["description"]
db.logWork(username, project, sdg, work_date, hours, description)
return json.dumps({"code": 1}) #success
except Exception as ex:
# print(ex)
return json.dumps({"code": 0}) #id not in database
'''
Logs out a user that is currently logged in. Deletes token and session information
from the database and sessionStorage. A user is also logged out when they close the
window.
input data format:
"username": username of user logging out.
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/logout", methods=['POST'])
@cross_origin()
def logout():
data = request.get_json(force=True)
# print(data)
username = data["username"]
# print(username)
try:
# print("deleting token")
db.deleteToken(username)
return json.dumps({"code": 1}) #success
except Exception as ex:
# print(ex)
return json.dumps({"code": 0}) #id not in database
# this is temporary token generating algorithm
# need to use library later
'''
Helper function to generate a token for a user
parameters:
length: length of token (32)
returns:
valid token to authenticate user login
'''
def generateToken(length):
allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
result = [allowed_chars[random.randint(0, len(allowed_chars) - 1)] for _ in range(length)]
return "".join(result)
'''
Helper function to set the token for the given user in the database.
parameters:
username: username of user who's token is generated
token: token for the given user
'''
def setUserToken(username, token):
try:
if username and token:
record = db.insertToken(username, token)
# print("insert token sucessful")
except Exception as ex:
return ex
'''
Retrieves the campaigns either for a specific student or a specific teacher depending on the role.
If the user is a student, this will get the student's personal instance of each campaign for the class
they are in. If the user is a teacher, this will get the overall instance of each campaign in all classes
owned by the teacher.
input data format:
"username": username of user who's campagins should be retrieved (from sessionStorage)
"role": role of the user, either "T" or "S" (from sessionStorage)
response data format:
"code": 1 for success, 0 for failure
"campaignList": list of campaigns assigned to or owned by the user in format [campaign_name, total_hours, start_date, due_date]
'''
@app.route("/api/getCampaigns", methods = ['GET'])
@cross_origin()
def getCampaigns():
# data = request.get_json(force=True)
role = request.args.get("role", default=None)
username = request.args.get("username", default=None)
current_class = request.args.get("current_class", default=None)
response = {}
try:
if role == "T":
campaigns = db.teacherGetCampaigns(username, current_class)
else:
campaigns = db.studentGetCampaigns(username)
# print(campaigns)
response["campaignList"] = campaigns
response["code"] = 1
except:
response["code"] = 0
return json.dumps(response, indent=4, sort_keys=True, default=str)
'''
Gets a student's goal to be displayed on the dashboard. A student will only have one
goal at a time.
input data format:
"username": username of student who's goal we want
output data format:
"code": 1 for success, 0 for failure
"goal": goal of the student in the form [total_hours, target_date]
'''
@app.route("/api/getGoal", methods = ['GET'])
@cross_origin()
def getGoal():
# data = request.get_json(force=True)
username = request.args.get("username", default=None)
response = {}
try:
goal = db.getGoal(username)
# print(goal)
response["goal"] = goal
response["code"] = 1
except:
response["code"] = 0
return json.dumps(response)
'''
Used by teachers to create a campaign for a class that they own. The campaign is then
visible by every student in that class.
input data format:
"username": username of teacher creating the campaign
"current_class": class name that the campaign will be assigned to
"name": name of the campaign
"hours": total hours required to complete the campaign
"start_date": date the the campaign starts
"due_date": due date of the campaign
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/createCampaign", methods = ['POST'])
@cross_origin()
def createCampaign():
data = request.get_json(force=True)
# print(data)
response = {}
try:
username = data["username"]
current_class = data["current_class"]
name = data["name"]
total_hours = data["hours"]
start_date = data["start_date"]
due_date = data["due_date"]
db.createCampaign(username, current_class, name, total_hours, start_date, due_date)
response["code"] = 1
except:
response["code"] = 0
return json.dumps(response)
# needed from frontend state: "username" can come from sessionStorage, "total_hours", and "target_date"
'''
Used by students to set a goal for themselves. A student will only have one goal
at a time. If a student already has a goal and sets a new one, the old goal is
overwritten.
input data format:
"username": username of the student setting the goal
"total_hours": total hours to achieve the goal
"target_date": target date for the goal to be completed by
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/createGoal", methods = ['POST'])
@cross_origin()
def createGoal():
data = request.get_json(force=True)
response = {}
try:
username = data["username"]
total_hours = data["total_hours"]
target_date = data["target_date"]
db.createGoal(username, total_hours, target_date)
response["code"] = 1
except:
response["code"] = 0
return json.dumps(response)
'''
Used by teachers once they receive a reset password link to reset their password.
Replaces old password in the database with the new password.
input data format:
"username": username of teacher resetting their password
"new_password": newly reset password
"conf_new_password": confirmation of new password
output data format:
"code": 1 for success, 0 for failure
'''
@app.route("/api/setNewPassword", methods=['POST'])
@cross_origin()
def setNewPassword():
data = request.get_json(force=True)
response = {}
try:
username = data["username"]
password = data["new_password"]
conf_password = data["conf_new_password"]
if len(password) >= 8 and password == conf_password:
db.resetPassword(username, password)
response["code"] = 1
else:
response["code"] = 0
except:
response["code"] = 0
return json.dumps(response)
'''
Gets current progress for either a specific student or all students in a specific class, depending on
user role. Queries database for each student for each campaign and calculates the percentage of completion
of that student for the given campaign. Progess for campaigns is calculated by summing the total hours
of work completed in the window set by the start and due dates of each campaign.
input data format:
"role": role of user, either "T" or "S" (from sessionStorage)
"username": username of student whose progress we want or of teacher whose class we want
"current_class": present if role is "T", name of class we want to see progress of
"student_filter":
output data format:
"code": 1 for success, 0 for failure
"progress": list of all student progress for all campaigns of class in the form of
[[campaign1, progress1], [campaign2, progess2], ...]
with campaign# in the form of
[class_name, campaign_name, total_hours, start_date, due_date]
and progress# in the form of
[[student1], [student2], ...]
with student# in the form of
[username, hours_complete, percentage_complete]
example:
[
[
[
"campaign1",
5,
"Tue, 02 Nov 2021 00:00:00 GMT",
"Sat, 06 Nov 2021 00:00:00 GMT"
],
[
[
"student1",
3,
"60%"
],
[
"student2",
3,
"60%"
]
]
],
[
[
"campaign2",
10,
"Wed, 03 Nov 2021 00:00:00 GMT",
"Sun, 07 Nov 2021 00:00:00 GMT"
],
[
[
"student1",
3,
"30%"
],
[
"student2",
3,
"30%"
]
]
]
]
'''
@app.route("/api/getProgress", methods=['GET'])
@cross_origin()
def getProgress():
# data = request.get_json(force=True)
# print(data)
role = request.args.get("role", default=None)
username = request.args.get("username", default=None)
current_class = request.args.get("current_class", default=None)
student_filter = request.args.get("student_filter", default=None)
response = {}
try:
total_progress = []
if role == "T":
# teacher is viewing progress
if student_filter:
# teacher views progress of specific student
campaigns = list(db.studentGetCampaigns(student_filter))
for campaign in campaigns:
campaign_progress = [[campaign[0], campaign[1], str(campaign[2]), str(campaign[3])], []]
progress = db.getStudentProgress(student_filter, campaign[2], campaign[3])
progress = calculateProgress(progress, student_filter, campaign[1])
campaign_progress[1].append(progress)
total_progress.append(campaign_progress)
else:
# teacher views progress of entire class
students = list(db.getStudentsOfClass(username, current_class))
campaigns = list(db.teacherGetCampaigns(username, current_class))
for campaign in campaigns:
campaign_progress = [[campaign[0], campaign[1], str(campaign[2]), str(campaign[3])], []]
for student in students:
progress = db.getStudentProgress(student[0], campaign[2], campaign[3])
progress = calculateProgress(progress, student[0], campaign[1])
campaign_progress[1].append(progress)
total_progress.append(campaign_progress)
else:
# student is viewing progress
campaigns = list(db.studentGetCampaigns(username))
for campaign in campaigns:
campaign_progress = [[campaign[0], campaign[1], str(campaign[2]), str(campaign[3])], []]
progress = db.getStudentProgress(username, campaign[2], campaign[3])
progress = calculateProgress(progress, username, campaign[1])
campaign_progress[1].append(progress)
total_progress.append(campaign_progress)
response["progress"] = total_progress
response["code"] = 1
return json.dumps(response)
except Exception as e:
print(e)
response["code"] = 0
return json.dumps(response)
'''
Helper function for /api/getProgress that calculates percentage of completion of a specific campaign
for a given student. Also handles that case where the student has not logged any work for the campaign.
Parameters:
progress: result of getStudentProgress database query in the form (user, total_hours)
username: username of student in case student is not found in query
goal_hours: total hours of campaign
Returns:
progress: complete progress report in the form (user, total_hours, percentage_complete)
'''
def calculateProgress(progress, username, goal_hours):
if len(progress) > 0:
user, total = progress[0]
total = int(total)
percentage = round(total / goal_hours * 100, 0)
if percentage > 100:
percentage = 100
else:
user = username
total = 0
percentage = 0
progress = (user, int(total), percentage)
return progress
'''
Gets total hours of logged work in specific class or for a specific student depending on role
Query has the option to be filtered by start date, end date, and teachers can request to see
a specific student's hours.
Input data format:
"role": role of user, either "T" or "S"
"username": username of user whose hours or class's hours we want
"class": class name of class whose hours we want
"start_date": optional filter, "" if not used
"end_date": optional filter, "" if not used
"student_filter": optional filter for teachers to see specific student's hours, "" if not used
Output data format:
"code": 1 for success, 0 for failure
"total_hours": total hours logged for student or class
'''
@app.route("/api/getTotalHours", methods=['GET'])
@cross_origin()
def getTotalHours():
# data = request.get_json(force=True)
start_date = request.args.get("start_date", default=None)
end_date = request.args.get("end_date", default=None)
role = request.args.get("role", default=None)
student_filter = request.args.get("student_filter", default=None)
username = request.args.get("username", default=None)
current_class = request.args.get("current_class", default=None)
response = {}
try:
total = 0
try:
if role == "T":
if student_filter:
student_progress = db.getStudentProgress(student_filter, start_date, end_date)[0][1]
if student_progress:
total = int(student_progress)
else:
class_hours = db.getClassTotalHours(username, current_class, start_date, end_date)[0][0]
if class_hours:
total = int(class_hours)
response["total_hours"] = total
response["code"] = 1
else:
student_progress = db.getStudentProgress(username, start_date, end_date)
if len(student_progress) >= 1 and len(student_progress[0]) >= 2:
student_progress = student_progress[0][1]
total = int(student_progress)
student_class_info = db.getStudentClass(username)[0]
class_hours = db.getClassTotalHours(student_class_info[1], student_class_info[0], start_date, end_date)[0][0]
if class_hours == None:
class_hours = 0
response["indiv_hours"] = total
response["class_hours"] = int(class_hours)
except IndexError:
pass
return json.dumps(response, indent=4, sort_keys=True, default=str)
except Exception as e:
print(e)
response["code"] = 0
return json.dumps(response)
'''
Gets a list of recent work logged by students in a class or by a specific student. If this is
called from the dashboard to be displayed there, it will only retrieve work from the last 14 days.
Otherwise, if it is called from the view progress page, it will retrieve all past work.
input data format:
"role": role of user retrieving progress
"all_work": boolean flag to signal whether to retrieve all work or last 14 days of work
"username": username of user retrieving progress
"current_class": if role is "T", name of class who's progress we want
output data format:
"code": 1 for success, 0 for failure
"recent_work": list of recent work in the format [username, project, SDG, date, hours, description]
"message": if no recent work, message telling user that no work has been logged in the time frame
'''
@app.route("/api/getRecentWork", methods=['GET'])
@cross_origin()
def getRecentWork():
# data = request.get_json(force=True)
role = request.args.get("role", default=None)
all_work = request.args.get("all_work", default=None)
username = request.args.get("username", default=None)
current_class = request.args.get("current_class", default=None)
response = {}
start_date = str(date.today() - timedelta(14))
end_date = str(date.today())
try:
if role == "T":
if all_work:
recent_work = db.teacherGetRecentWork(username, current_class)
if len(recent_work) == 0:
response["message"] = "There has not been any work logged for this class."
response["code"] = 2
else:
response["recent_work"] = recent_work
response["code"] = 1
else:
recent_work = db.teacherGetRecentWork(username, current_class, start_date, end_date)
if len(recent_work) == 0:
response["message"] = "There has not been any work logged in the last 14 days for this class."
response["code"] = 2
else:
response["recent_work"] = recent_work
response["code"] = 1
else:
if all_work:
recent_work = db.studentGetRecentWork(username)
if len(recent_work) == 0:
response["message"] = "You have not logged any work."
response["code"] = 2
else:
response["recent_work"] = recent_work
response["code"] = 1
else:
recent_work = db.studentGetRecentWork(username, start_date, end_date)
if len(recent_work) == 0:
response["message"] = "You have not logged any work in the last 14 days."
response["code"] = 2
else:
response["recent_work"] = recent_work
response["code"] = 1
return json.dumps(response, indent=4, sort_keys=True, default=str)
except Exception as e:
response["code"] = 0
return json.dumps(response)
@app.route("/api/getGoalProgress", methods=['GET'])
@cross_origin()
def getGoalProgress():
try:
username = request.args.get("username", default=None)
response = {}
goal = db.getGoal(username)
if len(goal) > 0:
total_hours = goal[0]
target_date = goal[1]
progress = db.getStudentProgress(username, None, target_date)
if len(progress) > 0:
current_hours = int(progress[0][1])
else:
current_hours = 0
response["total_hours"] = total_hours
response["current_hours"] = current_hours
response["code"] = 1
return json.dumps(response)
except:
response["code"] = 0
return json.dumps(response)
@app.route("/api/deleteUserAccount", methods=['POST'])
@cross_origin()
def deleteUserAccount():
try:
data = request.get_json(force=True)
username = data["currStudent"]
response = {}
print(username + "hello")
db.deleteUser(username)
response["code"] = 1
return json.dumps(response)
except:
response["code"] = 0
return json.dumps(response)
|
# Input:
# graph[][] = { {0, 5, INF, 10},
# {INF, 0, 3, INF},
# {INF, INF, 0, 1},
# {INF, INF, INF, 0} }
# which represents the following graph
# 10
# (0)------->(3)
# | /|\
# 5 | |
# | | 1
# \|/ |
# (1)------->(2)
# 3
# Note that the value of graph[i][j] is 0 if i is equal to j
# And graph[i][j] is INF (infinite) if there is no edge from vertex i to j.
#
# Output:
# Shortest distance matrix
# 0 5 8 9
# INF 0 3 4
# INF INF 0 1
# INF INF INF 0
# A utility function to print the solution
def printSolution(dist):
print "Following matrix shows the shortest distances\
between every pair of vertices"
for i in range(V):
for j in range(V):
if(dist[i][j] == INF):
print "%7s" %("INF"),
else:
print "%7d\t" %(dist[i][j]),
if j == V-1:
print ""
V = 4
INF = 999999
graph = [[0,5,INF,10],
[INF,0,3,INF],
[INF, INF, 0, 1],
[INF, INF, INF, 0]
]
dist = map(lambda row:map(lambda column:column, row), graph)
for k in range(V):
for i in range(V):
for j in range(V):
dist[i][j] = min(dist[i][j],
dist[i][k] + dist[k][j]
)
printSolution(dist)
print(dist[0][3])
|
from django.db import models
class Picture(models.Model):
""" Picture manages media and set alt tags. """
def __str__(self):
return self.alt
img = models.ImageField(upload_to='img/', null=True, blank=True)
alt = models.CharField(max_length=100)
|
import numpy as np
from random import randint, uniform
from . import intelligence
class aba(intelligence.sw):
"""
Artificial Bee Algorithm
"""
def __init__(self, n, function, lb, ub, dimension, iteration):
"""
:param n: number of agents
:param function: test function
:param lb: lower limits for plot axes
:param ub: upper limits for plot axes
:param dimension: space dimension
:param iteration: number of iterations
"""
super(aba, self).__init__()
self.__function = function
self.__agents = np.random.uniform(lb, ub, (n, dimension))
self._points(self.__agents)
Pbest = self.__agents[np.array([function(x)
for x in self.__agents]).argmin()]
Gbest = Pbest
if n <= 10:
count = n - n // 2, 1, 1, 1
else:
a = n // 10
b = 5
c = (n - a * b - a) // 2
d = 2
count = a, b, c, d
for t in range(iteration):
fitness = [function(x) for x in self.__agents]
sort_fitness = [function(x) for x in self.__agents]
sort_fitness.sort()
best = [self.__agents[i] for i in
[fitness.index(x) for x in sort_fitness[:count[0]]]]
selected = [self.__agents[i]
for i in [fitness.index(x)
for x in sort_fitness[count[0]:count[2]]]]
newbee = self.__new(best, count[1], lb, ub) + self.__new(selected,
count[3],
lb, ub)
m = len(newbee)
self.__agents = newbee + list(np.random.uniform(lb, ub, (n - m,
dimension)))
self.__agents = np.clip(self.__agents, lb, ub)
self._points(self.__agents)
Pbest = self.__agents[
np.array([function(x) for x in self.__agents]).argmin()]
if function(Pbest) < function(Gbest):
Gbest = Pbest
self._set_Gbest(Gbest)
def __new(self, l, c, lb, ub):
bee = []
for i in l:
new = [self.__neighbor(i, lb, ub) for k in range(c)]
bee += new
bee += l
return bee
def __neighbor(self, who, lb, ub):
neighbor = np.array(who) + uniform(-1, 1) * (
np.array(who) - np.array(
self.__agents[randint(0, len(self.__agents) - 1)]))
neighbor = np.clip(neighbor, lb, ub)
return list(neighbor)
|
"""Commands to be executed from command line."""
# pylint: disable=E0012,C0330,R0913
import click
from .autoimports.cli import autoimports
from .s3_import.cli import s3_import
@click.group()
def s3(): # pylint: disable=C0103
"""S3 imports managements commands.
Setup guide: https://docs.gencove.com/main/s3-imports/
"""
s3.add_command(autoimports)
s3.add_command(s3_import)
|
"""
Copyright (c) 2020 Jun Zhu
"""
import copy
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from agent_base import _AgentBase, Memory
from utilities import copy_nn, soft_update_nn, OUProcess
device = torch.device("cuda:0") if torch.cuda.is_available() else "cpu"
class DdpgAgent:
def __init__(self, actor, critic):
self.actor = actor.to(device)
self.actor_target = copy.deepcopy(actor).to(device)
self.critic = critic.to(device)
self.critic_target = copy.deepcopy(critic).to(device)
def update_nn(self, tau):
soft_update_nn(
self.actor, self.actor_target, tau)
soft_update_nn(
self.critic, self.critic_target, tau)
class MaDdpgAgent(_AgentBase):
"""Multi-agent deep deterministic policy gradient agent.
https://arxiv.org/abs/1706.02275.
"""
def __init__(self, state_space, action_space, n_agents, models, *,
brain_name="TennisBrain",
model_file="maddpg_checkpoint.pth",
replay_memory_size=1000):
"""Initialization.
:param int state_space: state space size.
:param int action_space: action space size.
:param int n_agents: number of agents.
:param tuple models: Neural network classes for actor and critic.
:param int replay_memory_size: size of the replay buffer.
"""
super().__init__(brain_name, model_file)
self._state_space = state_space
self._action_space = action_space
self._n_agents = n_agents
self._agents = [DdpgAgent(models[0](), models[1]())
for _ in range(n_agents)]
self._memory = Memory(replay_memory_size)
def _act(self, states, noises=None):
"""Override."""
if noises is None:
noises = np.zeros((self._n_agents, self._action_space))
actions = []
for agent, state, noise in zip(self._agents, states, noises):
state = torch.from_numpy(
np.expand_dims(state, axis=0)).float().to(device)
agent.actor.eval() # set the module in evaluation mode
with torch.no_grad():
action_values = agent.actor(state)
agent.actor.train() # set the module in training mode
action = np.squeeze(action_values.cpu().detach().numpy(), axis=0)
action += noise
action = np.clip(action, -1., 1.)
actions.append(action)
return actions
def _learn(self, batch_size, opts_actor, opts_critic, gamma, tau):
"""Learn from a given trajectory.
:param (Tuple[torch.Variable]) experiences: (s, a, r, s', done)
:param Optimizer opts_actor: actor optimizers used for gradient ascend.
:param Optimizer opts_critic: critic optimizers used for gradient ascend.
:param float gamma: discount factor.
:param float tau: soft update rate of the target network.
"""
critic_losses = []
actor_losses = []
for i, (agent, opt_critic, opt_actor) \
in enumerate(zip(self._agents, opts_critic, opts_actor)):
# sample for each agent
states, actions, rewards, next_states, dones = \
self._memory.sample(batch_size, device=device)
# update critic
next_actions_target = []
for j, _agent in enumerate(self._agents):
next_actions_target.append(_agent.actor_target(
next_states[:, j * self._state_space:(j + 1) * self._state_space]))
next_actions_target = torch.cat(next_actions_target, dim=-1)
q_next = agent.critic_target(next_states, next_actions_target)
q_targets = rewards[:, i, None] \
+ gamma * q_next * (1 - dones[:, i, None])
q_expected = agent.critic(states, actions)
assert(q_expected.shape == q_targets.shape)
critic_loss = F.mse_loss(q_targets.detach(), q_expected)
opt_critic.zero_grad()
critic_loss.backward()
opt_critic.step()
critic_losses.append(critic_loss.item())
# update actor
next_actions_pred = []
for j, _agent in enumerate(self._agents):
if j == i:
next_actions_pred.append(_agent.actor(
states[:, j * self._state_space:(j + 1) * self._state_space]))
else:
next_actions_pred.append(
actions[:, j * self._action_space:(j + 1) * self._action_space])
next_actions_pred = torch.cat(next_actions_pred, dim=-1)
actor_loss = - agent.critic(states, next_actions_pred).mean()
opt_actor.zero_grad()
actor_loss.backward()
opt_actor.step()
actor_losses.append(actor_loss.item())
# apply soft update
for agent in self._agents:
agent.update_nn(tau)
return actor_losses, critic_losses
def train(self, env, *,
n_episodes=1000,
theta=0.15,
sigma=0.2,
decay_rate=0.99,
tau=0.001,
gamma=1.0,
learning_rate=(1e-3, 1e-3),
weight_decay=(0., 0.),
batch_size=16,
n_trainings_per_step=1,
replay_start_size=None,
window=100,
target_score=30,
continue_after_reaching_target=False,
save_frequency=100,
output_frequency=10):
"""Train the agent.
:param gym.Env env: environment.
:param int n_episodes: number of episodes.
:param float theta: Ornstein-Uhlenbeck process constant.
:param float sigma: Ornstein-Uhlenbeck process constant.
:param float decay_rate: noise decay rate.
:param float tau: soft update rate of the target network.
:param double gamma: discount factor.
:param tuple learning_rate: learning rates for actor and critic models.
:param double weight_decay: L2 penalties for actor and critic models.
:param int batch_size: mini batch size.
:param int n_trainings_per_step: number of trainings per time step.
:param int replay_start_size: a uniform random policy is run for this
number of frames before training starts.
:param int window: the latest window episodes will be used to evaluate
the performance of the model.
:param float target_score: the the average score of the latest window
episodes is larger than the target score. The problem is considered
solved.
:param bool continue_after_reaching_target: True for continuing the
training after reaching the target score.
:param int save_frequency: the frequency of saving the states of the
agent.
:param int output_frequency: the frequency of summarizing the
training result.
"""
opts_actor = [
optim.Adam(agent.actor.parameters(),
lr=learning_rate[0],
weight_decay=weight_decay[0]) for agent in self._agents
]
opts_critic = [
optim.Adam(agent.critic.parameters(),
lr=learning_rate[1],
weight_decay=weight_decay[1]) for agent in self._agents
]
try:
checkpoint = torch.load(self._model_file)
except FileNotFoundError:
checkpoint = None
if checkpoint is None:
i0 = 0
scores_hist = []
best_saved_score = -np.inf
losses_actor_hist = [[] for _ in range(self._n_agents)]
losses_critic_hist = [[] for _ in range(self._n_agents)]
else:
i0 = checkpoint['epoch']
scores_hist = checkpoint['score_history']
losses_actor_hist = checkpoint['actor_loss_history']
losses_critic_hist = checkpoint['critic_loss_history']
for i, agent in enumerate(self._agents):
agent.actor.load_state_dict(
checkpoint['model_actor_state_dict'][i])
copy_nn(agent.actor, agent.actor_target)
agent.critic.load_state_dict(
checkpoint['model_critic_state_dict'][i])
copy_nn(agent.critic, agent.critic_target)
for i, opt in enumerate(opts_actor):
opt.load_state_dict(
checkpoint['optimizer_actor_state_dict'][i])
for i, opt in enumerate(opts_critic):
opt.load_state_dict(
checkpoint['optimizer_critic_state_dict'][i])
# score is the maximum of the scores from all agents
avg_score = np.mean(scores_hist[-window:])
best_saved_score = avg_score
print(f"Loaded existing model ended at epoch: {i0} with average"
f"score of {avg_score:8.4f}")
if avg_score > target_score and not continue_after_reaching_target:
print(f"Score of the current model {avg_score:8.4f} is already "
f"higher than the target score {target_score}!")
return scores_hist, losses_actor_hist, losses_critic_hist
if replay_start_size is None:
replay_start_size = batch_size * 2
brain_name = self._brain_name
i = i0
decay = decay_rate ** i
while i < n_episodes:
i += 1
random_process = OUProcess(
self._action_space * self._n_agents, theta=theta, sigma=sigma)
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
scores = [0.] * self._n_agents
# one episode has 14 ~ 50 steps
while True:
actions = self._act(
states,
random_process.next().reshape(self._n_agents, -1) * decay)
env_info = env.step(actions)[brain_name]
rewards = env_info.rewards
for i_r, r in enumerate(rewards):
scores[i_r] += r
# It should be encouraged to hit the ball, even it is a
# "bad" hit. It is found that the result becomes more
# stable. Note that this change does not affect the
# calculation of the score.
if r < 0:
rewards[i_r] = -r
next_states = env_info.vector_observations
dones = env_info.local_done
self._memory.append(
states, actions, rewards, next_states, dones)
states = next_states
if len(self._memory) > replay_start_size:
for _ in range(n_trainings_per_step):
losses_actor, losses_critic = self._learn(
batch_size,
opts_actor,
opts_critic,
gamma,
tau
)
for i_a in range(self._n_agents):
losses_actor_hist[i_a].append(losses_actor[i_a])
losses_critic_hist[i_a].append(losses_critic[i_a])
if dones[0]:
break
decay *= decay_rate
scores_hist.append(max(scores))
avg_score = np.mean(scores_hist[-window:])
if i % output_frequency == 0:
print(f"Epoch: {i:04d}, average score: {avg_score:8.4f}")
# save if the target score has been achieved and the current score
# is better than the saved score.
if avg_score >= target_score and avg_score > best_saved_score:
self._save_model(i, opts_actor, opts_critic, scores_hist,
[losses_actor_hist, losses_critic_hist])
best_saved_score = avg_score
if not continue_after_reaching_target:
break
if i % save_frequency == 0 and \
(best_saved_score < target_score or avg_score > best_saved_score):
# save if the target score has not been achieved or the current score
# is better than the saved score.
best_saved_score = avg_score
self._save_model(i, opts_actor, opts_critic, scores_hist,
[losses_actor_hist, losses_critic_hist])
return scores_hist, losses_actor_hist, losses_critic_hist
def _save_model(self, epoch, opts_actor, opts_critic, scores, losses):
torch.save({
'epoch': epoch,
'optimizer_actor_state_dict':
[opt.state_dict() for opt in opts_actor],
'optimizer_critic_state_dict':
[opt.state_dict() for opt in opts_critic],
'score_history': scores,
'actor_loss_history': losses[0],
'critic_loss_history': losses[1],
'model_actor_state_dict':
[a.actor.state_dict() for a in self._agents],
'model_critic_state_dict':
[a.critic.state_dict() for a in self._agents],
}, self._model_file)
print(f"Model save in {self._model_file} after {epoch} epochs!")
|
from scipy.spatial.distance import jaccard
import numpy as np
import pandas as pd
# Computing Jaccard Distance of two 5D-Rectangles
# Issues to deal with:
# Normalizing values?
# Input format correct?
# Weighting of the different dimensions?
def jaccard_distance(datFr, name, pred):
"""
Should return the "closest" jaccard distance of the rectangles in the label dat
and the prediction distance.
Input:
datFr: 5 Dim. DataFrame including all labels, assuming that column 0
includes the names of the respective files the rectangles belong to.
name: Name as string of the correct file.
pred: Prediction rectangle
Return:
Closest Distance (should be a float)
"""
# Indexing the correct rectangles based on the name, retrieving all
# columns, minus the "name"-one
corr_rect = datFr.loc[datFr[0].str.match(name), 1:]
# Computing all Jaccard Distances
jacc_distances = corr_rect.apply(jaccard, axis=1, args=[pred])
# Returning closest distance
return jacc_distances.min()
"""
Returns closest Ruzicka Distance, related to Jaccard Distance, of rectangles
in the label dat and the prediction distance.
Input:
datFr: 5 Dim. DataFrame including all labels, assuming that column 0
includes the names of the respective files the rectangles belong to.
name: Name as string of the correct file.
pred: Prediction rectangle
Return:
Closest Distance (should be a float)
"""
def ruzicka_distance(datFr, name, pred):
"""
Chooses max and min per point, ultimately returning 1 minus the sum of the
vector of minimal values by the sum of the vector of maximal values.
(Ruzicka Similarity and Soergel Distance). So, if they are the same it
returns 0, else it returns a higher value.
"""
def ruz_similarity(x, y):
min_vec = np.minimum(x, y)
max_vec = np.maximum(x, y)
# Return Soergel Distance
return 1 - min_vec.sum() / max_vec.sum()
# Indexing the correct rectangles based on the name, retrieving all
# columns, minus the "name"-one
corr_rect = datFr.loc[datFr[0].str.match(name), 1:]
# Getting Ruzicka for all correct Rectangles
ruz_distances = corr_rect.apply(ruz_similarity, axis=1, args=[pred])
return ruz_distances.min()
"""
Function to incorporate both the positive and negative rectangles. Computes
both the Ruzicka distance to the closest positive and negative rectangle and
returns the positive plus the inverted negative Soergel Distance divided by two.
Input:
pos_df: 5 Dim. DataFrame including all labels for pos. rectangles
(see ruzicka_distance)
neg_df: 5 DIm. DataFrame, but for negative rectangles
name: Name as string of correct image
pred: Prediction Rectangle
"""
def ruz_posneg(pos_df, neg_df, name, pred):
ruz_pos = ruzicka_distance(pos_df, name, pred)
ruz_neg = 1 - ruzicka_distance(neg_df, name, pred)
return (ruz_pos + ruz_neg) / 2
|
class Solution(object):
def plusOneUnoptimized(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
first = True
carry = 1
counter = 1
digit_len = len(digits)
while carry and counter <= digit_len:
digits[counter*-1] = digits[counter*-1] + carry
if digits[counter*-1] == 10:
digits[counter*-1] = 0
else:
carry = 0
counter += 1
if carry:
digits.insert(0, carry)
return digits
def plusOneOptimized(self, digits):
num = 0
for i in range(len(digits)):
num = num * 10 + digits[i]
return [int(i) for i in str(num+1)] |
from conehead import (
Source, Block , SimplePhantom, Conehead
)
import numpy as np
import pydicom
from scipy.interpolate import RegularGridInterpolator
# Load test plan
plan = pydicom.dcmread("RP.3DCRT.dcm", force=True)
# Choose source
source = Source("varian_clinac_6MV")
source.gantry(0)
source.collimator(0)
# Set the jaws and MLC
# block = Block(source.rotation, plan=plan)
block = Block()
block.set_square(30)
# Use a simple cubic phantom
phantom = SimplePhantom()
# Calculation settings
settings = {
'stepSize': 0.1, # Stepsize when raytracing effective depth (cm)
'sPri': 0.90924, # Primary source strength
'sAnn': 2.887e-3, # Annular source strength
'zAnn': -4.0, # Height of annular source in beam coordinates
'rInner': 0.2, # Inner radius of annular source
'rOuter': 1.4, # Outer radius of annular source
'zExp': -12.5, # Height of exponential source in beam coordinates
'sExp': 8.289e-3, # Exponential source strength
'kExp': 0.4816, # Exponential source exponent coefficient
'softRatio': 0.0025, # cm^-1
'softLimit': 20, # cm
'hornRatio': 0.0065, # % per cm
'eLow': 0.01, # MeV
'eHigh': 7.0, # MeV
'eNum': 500, # Spectrum samples
'fluenceResampling': 3 # Split voxels for fluence calculatiion
}
conehead = Conehead()
conehead.calculate(source, block, phantom, settings)
conehead.plot()
|
from .predictor import BasePredictor, SegPredictor, ImSpecPredictor, Locator
from .epredictor import EnsemblePredictor, ensemble_locate
__all__ = ["BasePredictor", "SegPredictor", "ImSpecPredictor",
"EnsemblePredictor", "ensemble_locate", "Locator"]
|
import sys
from xml.etree import ElementTree
html = ElementTree.Element("html")
body = ElementTree.Element("body")
html.append(body)
div = ElementTree.Element("div")
body.append(div)
span = ElementTree.Element("span")
div.append(span)
span.text = "Hello World"
string = ElementTree.tostring(html).decode()
file = open("Generated/Biomes.html","w+")
file.write(string)
file.close() |
default_app_config = 'ore.projects.apps.ProjectsConfig'
|
from .avocent_request import AvocentRequest
from ..models import LoginResult
class LoginRequest(AvocentRequest):
username = ""
password = ""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
def get_body(self):
str = '<avtrans><sid></sid><action>login</action><agents><src>wmi</src><dest>controller</dest></agents><paths><path>units.topology</path></paths><payload><section structure="login"><parameter id="username" structure="RWtext"><value>{}</value></parameter><parameter id="password" structure="password"><value>{}</value></parameter></section></payload></avtrans>'
return str.format(self.username, self.password)
def parse_response(self, xml):
if xml.find(".//error") is not None:
return LoginResult(False, "")
sid = xml.find(".//sid")
return LoginResult(True, sid.text)
|
from django.contrib import admin
from .models import *
class MaDrivers(admin.ModelAdmin):
list_display = ('name', 'location', 'availability', 'description')
class ContactUsAdmin(admin.ModelAdmin):
list_display = ('name', 'phone')
#
#
# class basket(admin.ModelAdmin):
# list_display = ('user', 'status')
# class ReferralsAdmin(admin.ModelAdmin):
# list_display = ('description', 'discount')
# admin.site.register(MaDere, MaDrivers)
admin.site.register(ContactUs, ContactUsAdmin)
# admin.site.register(BasketLine, basketline)
# admin.site.register(Referrals, ReferralsAdmin)
# admin.site.register(ads)
# admin.site.register(templatetags)
|
This is experiment 2 homework 2
|
"""
Autopsy Forensic Browser
Copyright 2016-2020 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import MessageReadStatus
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
import traceback
import general
"""
Locates database for the Tango app and adds info to blackboard.
"""
class TangoMessageAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._PACKAGE_NAME = "com.sgiggle.production"
self._PARSER_NAME = "Tango Parser"
self._MESSAGE_TYPE = "Tango Message"
self._VERSION = "7" # DB_VERSION in 'profiles' table
def analyze(self, dataSource, fileManager, context):
try:
tangoDbFiles = AppSQLiteDB.findAppDatabases(dataSource, "tc.db", True, self._PACKAGE_NAME)
for tangoDbFile in tangoDbFiles:
try:
self.__findTangoMessagesInDB(tangoDbFile, dataSource)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Tango messages", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding Tango messages.
pass
def __findTangoMessagesInDB(self, tangoDb, dataSource):
if not tangoDb:
return
try:
current_case = Case.getCurrentCaseThrows()
# Create a helper to parse the DB
tangoDbHelper = CommunicationArtifactsHelper(current_case.getSleuthkitCase(),
self._PARSER_NAME,
tangoDb.getDBFile(),
Account.Type.TANGO )
resultSet = tangoDb.runQuery(
"SELECT conv_id, create_time, direction, payload FROM messages ORDER BY create_time DESC;")
while resultSet.next():
fromId = None
toId = None
conv_id = resultSet.getString("conv_id") # seems to wrap around the message found in payload after decoding from base-64
create_time = Long.valueOf(resultSet.getString("create_time")) / 1000
if resultSet.getString("direction") == "1": # 1 incoming, 2 outgoing
direction = CommunicationDirection.INCOMING
else:
direction = CommunicationDirection.OUTGOING
payload = resultSet.getString("payload")
msgBody = TangoMessageAnalyzer.decodeMessage(conv_id, payload)
messageArtifact = tangoDbHelper.addMessage(
self._MESSAGE_TYPE,
direction,
fromId,
toId,
create_time,
MessageReadStatus.UNKNOWN,
"", # subject
msgBody,
"")
except SQLException as ex:
self._logger.log(Level.WARNING, "Error processing query result for Tango messages", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add Tango message artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
finally:
tangoDb.close()
# take the message string which is wrapped by a certain string, and return the text enclosed.
@staticmethod
def decodeMessage(wrapper, message):
result = ""
decoded = Base64.decodeBase64(message)
try:
Z = String(decoded, "UTF-8")
result = Z.split(wrapper)[1]
except Exception as ex:
# Error decoding a Tango message.
pass
return result
|
from . import (
analysers,
configuration,
connectors,
inertia,
multibody,
output,
rigidbody,
)
|
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
result = []
self.dfs(n, 0, 0, "", result)
return result
def dfs(self, n, left, right, node, result):
if left == n and right == n:
result.append(node)
if left < n:
self.dfs(n, left + 1, right, node + "(", result)
if right < left:
self.dfs(n, left, right + 1, node + ")", result)
|
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField, HiddenField, IntegerField, BooleanField
from wtforms.validators import DataRequired
from wtforms.widgets import HiddenInput
class LoginForm(FlaskForm):
username = StringField("username", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
back = HiddenField("back")
args = HiddenField("args")
class UserIdForm(FlaskForm):
user_id = IntegerField("user_id", widget=HiddenInput())
class PermissionsForm(UserIdForm):
can_visit = BooleanField("can_visit")
can_edit = BooleanField("can_edit")
is_admin = BooleanField("is_admin")
class PasswordForm(UserIdForm):
password_1 = PasswordField("password_1", validators=[DataRequired()])
password_2 = PasswordField("password_2", validators=[DataRequired()])
class UserCreateForm(FlaskForm):
username = StringField("username", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
can_visit = BooleanField("can_visit")
can_edit = BooleanField("can_edit")
is_admin = BooleanField("is_admin")
|
"""Loss modules"""
from .semseg_loss import SemSegLoss
from .cross_entropy import CrossEntropyLoss
from .focal_loss import FocalLoss
from .smooth_L1 import SmoothL1Loss
__all__ = ['SemSegLoss', 'CrossEntropyLoss', 'FocalLoss', 'SmoothL1Loss']
|
from app.config import settings
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
# BASE
'''
declarative_base() is a factory function that constructs a base class
for declarative class definitions.
Ref : https://docs.sqlalchemy.org/en/13/orm/extensions/declarative/api.html
'''
Base = declarative_base()
def get_engine():
return create_engine(settings.get('DATABASE_URL'))
def drop_table():
Base.metadata.drop_all(create_engine(settings.get('DATABASE_URL')))
def create_all(drop=None):
print("----- Created all called -----")
print('Database : {}'.format(settings.get('DATABASE_URL')))
return Base.metadata.create_all(create_engine(settings.get('DATABASE_URL'), echo=True))
def get_session():
print("=================================")
print("Session Created")
print("=================================")
engine = get_engine()
session_factory = sessionmaker(bind=engine, autoflush=True)
sess = scoped_session(session_factory)
return sess
from .countries import Countries
|
'''
Parsing - Exercise 16
The script reads a multiple sequence file in FASTA format and
only write to a new file the records the Uniprot ACs of which
are present in the list created in Exercise 14).
'''
# We need two input files
cancer_file = open('cancer-expressed.txt')
human_fasta = open('SwissProt-Human.fasta')
Outfile = open('cancer-expressed_records.fasta','w')
# Create the list from cancer-expressed.txt
cancer_list = []
for line in cancer_file:
AC = line.strip()
cancer_list.append(AC)
# Read the FASTA input and check if ACs are in cancer_list
for line in human_fasta:
if line[0] == ">":
AC = line.split("|")[1]
# Write the header to the output
if AC in cancer_list:
Outfile.write(line)
else:
# Write the sequence to the output
if AC in cancer_list:
Outfile.write(line)
Outfile.close()
|
# Created By: Virgil Dupras
# Created On: 2006/10/06
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from . import _sql as sql
import hsfs as fs
import hsfs.music
from hsfs.phys import music
from hscommon import io
from hscommon.path import Path
from jobprogress.job import nulljob, JobCancelled
class Node:
@property
def parent_volume(self):
if self.parent is not None:
return self.parent.parent_volume
@property
def physical_path(self):
return self.parent.physical_path + self.name
class File(sql.File, Node, hsfs.music._File):
pass
class Directory(sql.Directory, Node):
cls_file_class = File
(VOLTYPE_CDROM,
VOLTYPE_FIXED) = list(range(2))
(MODE_NORMAL,
MODE_PHYSICAL,
MODE_TOKEN) = list(range(3))
class Volume(Directory):
cls_dir_class = Directory
def __init__(self, parent, name):
super(Volume, self).__init__(parent, name)
self.__initial_path = None
self.__mode = MODE_NORMAL
#---Protected
def _build_path(self): #Override
if self.mode == MODE_PHYSICAL:
return self.physical_path
elif self.mode == MODE_TOKEN:
return ('!%s' % self.name, )
else:
return super(Volume, self)._build_path()
#---Public
def update(self, ref=None, job=nulljob):
if ref is None:
ref = music.Directory(None, str(self.initial_path))
try:
super(Volume, self).update(ref, job=job)
except fs.InvalidPath:
pass
#---Properties
@property
def initial_path(self):
if self.__initial_path is None:
try:
value = self._get_attr('initial_path')
except KeyError:
value = ''
self.__initial_path = Path(value)
return self.__initial_path
@initial_path.setter
def initial_path(self, value):
self._set_attr('initial_path', str(value))
self.__initial_path = None
@property
def is_available(self):
return io.exists(self.physical_path)
@property
def is_removable(self):
return self.vol_type == VOLTYPE_CDROM
@property
def mode(self):
return self.__mode
@mode.setter
def mode(self, value):
self.__mode = value
self._invalidate_path()
@property
def parent_volume(self):
return self
@property
def physical_path(self):
if self.vol_type == VOLTYPE_CDROM:
return self.parent.buffer_path + self.name
else:
return self.initial_path
@property
def vol_type(self):
try:
return self._get_attr('vol_type')
except KeyError:
return VOLTYPE_FIXED
@vol_type.setter
def vol_type(self, value):
self._set_attr('vol_type',value)
class Root(sql.Root):
cls_dir_class = Volume
cls_file_class = File
def __init__(self, dbname=':memory:', dirname='', threaded=True):
super(Root, self).__init__(dbname, dirname, threaded=threaded)
self._attrs_to_read = ['audiosize', 'size', 'ctime', 'mtime', 'duration', 'bitrate', 'samplerate', 'title',
'artist', 'album', 'genre', 'year', 'track', 'comment']
def add_volume(self, ref, volume_name, volume_type, job=nulljob):
result = self.new_directory(volume_name)
try:
result.update(ref, job)
except JobCancelled:
# If add_volume is cancelled, we don't want a half updated volume added.
# We want nothing added.
result.delete()
raise
result.vol_type = volume_type
result.initial_path = ref.path
return result
def update_volumes(self,job=nulljob):
updatable = [volume for volume in self if volume.vol_type == VOLTYPE_FIXED]
job = job.start_subjob(len(updatable))
for volume in updatable:
volume.update(job=job)
buffer_path = Path(())
|
#!/usr/bin/env python
# Copyright 2019 Pascal Audet & Helen Janiszewski
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Program atacr_correct_event.py
------------------------------
Calculates transfer functions using the noise windows flagged as "good", for either
a single day (from `obs_daily_spectra.py`) or for those averaged over several days
(from `obs_clean_spectra.py`), if available. The transfer functions are stored to disk.
Station selection is specified by a network and
station code. The data base is provided as a
`StDb` dictionary.
Usage
-----
.. code-block::
$ atacr_correct_event.py -h
Usage: atacr_correct_event.py [options] <station database>
Script used to extract transfer functions between various components, and use
them to clean vertical component of OBS data for selected events. The noise
data can be those obtained from the daily spectra (i.e., from
`obs_daily_spectra.py`) or those obtained from the averaged noise spectra
(i.e., from `obs_clean_spectra.py`). Flags are available to specify the source
of data to use as well as the time range for given events. The stations are
processed one by one and the data are stored to disk.
Options:
-h, --help show this help message and exit
--keys=STKEYS Specify a comma separated list of station keys for which
to perform the analysis. These must be contained within
the station database. Partial keys will be used to match
against those in the dictionary. For instance, providing
IU will match with all stations in the IU network.
[Default processes all stations in the database]
-O, --overwrite Force the overwriting of pre-existing data. [Default
False]
Parameter Settings:
Miscellaneous default values and settings
--skip-daily Skip daily spectral averages in application of transfer
functions. [Default False]
--skip-clean Skip cleaned spectral averages in application of transfer
functions. [Default False]
--fmin=FMIN Low frequency corner (in Hz) for plotting the raw (un-
corrected) seismograms. Filter is a 2nd order, zero phase
butterworth filter. [Default 1./150.]
--fmax=FMAX High frequency corner (in Hz) for plotting the raw (un-
corrected) seismograms. Filter is a 2nd order, zero phase
butterworth filter. [Default 1./10.]
Figure Settings:
Flags for plotting figures
--figRaw Plot raw seismogram figure. [Default does not plot figure]
--figClean Plot cleaned vertical seismogram figure. [Default does not
plot figure]
Time Search Settings:
Time settings associated with searching for specific event-related
seismograms
--start=STARTT Specify a UTCDateTime compatible string representing the
start day for the event search. This will override any
station start times. [Default start date of each station
in database]
--end=ENDT Specify a UTCDateTime compatible string representing the
start time for the event search. This will override any
station end times. [Default end date of each station in
database]
"""
# Import modules and functions
import os
import numpy as np
from obspy import UTCDateTime
import pickle
import stdb
from obstools.atacr.classes import StaNoise, Power, Cross, Rotation, TFNoise
from obstools.atacr import utils, plot, options
def main():
# Run Input Parser
(opts, indb) = options.get_correct_options()
# Load Database
db = stdb.io.load_db(fname=indb)
# Construct station key loop
allkeys = db.keys()
sorted(allkeys)
# Extract key subset
if len(opts.stkeys) > 0:
stkeys = []
for skey in opts.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Path where transfer functions will be located
transpath = 'TF_STA/' + stkey + '/'
if not os.path.isdir(transpath):
raise(Exception("Path to "+transpath+" doesn`t exist - aborting"))
# Path where event data are located
eventpath = 'EVENTS/' + stkey + '/'
if not os.path.isdir(eventpath):
raise(Exception("Path to "+eventpath+" doesn`t exist - aborting"))
# Get catalogue search start time
if opts.startT is None:
tstart = sta.startdate
else:
tstart = opts.startT
# Get catalogue search end time
if opts.endT is None:
tend = sta.enddate
else:
tend = opts.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print(" ")
print(" ")
print("|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Find all files in directories
event_files = os.listdir(eventpath)
trans_files = os.listdir(transpath)
# Check if folders contain anything
if not event_files:
raise(Exception("There are no events in folder " + eventpath))
if not trans_files:
raise(Exception("There are no transfer functions in folder " +
transpath))
# Cycle through available files
for eventfile in event_files:
# Skip hidden files and folders
if eventfile[0] == '.':
continue
evprefix = eventfile.split('.')
evstamp = evprefix[0]+'.'+evprefix[1]+'.'
evDateTime = UTCDateTime(evprefix[0]+'-'+evprefix[1])
if evDateTime >= tstart and evDateTime <= tend:
# Load event file
try:
file = open(eventpath+eventfile, 'rb')
eventstream = pickle.load(file)
file.close()
except:
print("File "+eventpath+eventfile +
" exists but cannot be loaded")
continue
else:
continue
if opts.fig_event_raw:
plot.fig_event_raw(eventstream, fmin=opts.fmin, fmax=opts.fmax)
# Cycle through corresponding TF files
for transfile in trans_files:
# Skip hidden files and folders
if transfile[0] == '.':
continue
tfprefix = transfile.split('transfunc')[0]
# This case refers to the "cleaned" spectral averages
if len(tfprefix) > 9:
if not opts.skip_clean:
yr1 = tfprefix.split('-')[0].split('.')[0]
jd1 = tfprefix.split('-')[0].split('.')[1]
yr2 = tfprefix.split('-')[1].split('.')[0]
jd2 = tfprefix.split('-')[1].split('.')[1]
date1 = UTCDateTime(yr1+'-'+jd1)
date2 = UTCDateTime(yr2+'-'+jd2)
dateev = UTCDateTime(evprefix[0]+'-'+evprefix[1])
if dateev >= date1 and dateev <= date2:
print(transpath+transfile +
" file found - applying transfer functions")
try:
file = open(transpath+transfile, 'rb')
tfaverage = pickle.load(file)
file.close()
except:
print("File "+transpath+transfile +
" exists but cannot be loaded")
continue
# List of possible transfer functions for station
# average files
eventstream.correct_data(tfaverage)
correct = eventstream.correct
if opts.fig_plot_corrected:
plot.fig_event_corrected(
eventstream, tfaverage.tf_list)
# This case refers to the "daily" spectral averages
else:
if not opts.skip_daily:
if tfprefix == evstamp:
print(transpath+transfile +
" file found - applying transfer functions")
try:
file = open(transpath+transfile, 'rb')
tfaverage = pickle.load(file)
file.close()
except:
print("File "+transpath+transfile +
" exists but cannot be loaded")
continue
# List of possible transfer functions for station
# average files
eventstream.correct_data(tfaverage)
correct = eventstream.correct
if opts.fig_plot_corrected:
plot.fig_event_corrected(
eventstream, tfaverage.tf_list)
if __name__ == "__main__":
# Run main program
main()
|
def basis_set_setter(element_symbol):
"""
:param element_symbol: string, element symbol
:return: string, Basis set name
"""
#HERE I should define all the elements and all the basis set
if element_symbol=='H':
return "TZV2PX-MOLOPT-GTH"
elif element_symbol=='F':
return "TZV2PX-MOLOPT-GTH"
elif element_symbol=='Cl':
return "TZV2PX-MOLOPT-GTH"
elif element_symbol=='I':
return "DZVP-MOLOPT-SR-GTH"
|
# Generated by Django 2.1.3 on 2019-04-25 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0007_auto_20190425_0312'),
]
operations = [
migrations.AlterField(
model_name='post',
name='thumb',
field=models.ImageField(default=None, upload_to=''),
),
]
|
"""Test the WebOS Tv config flow."""
import dataclasses
from unittest.mock import Mock, patch
from aiowebostv import WebOsTvPairError
import pytest
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.components.webostv.const import CONF_SOURCES, DOMAIN, LIVE_TV_APP_ID
from homeassistant.config_entries import SOURCE_SSDP
from homeassistant.const import (
CONF_CLIENT_SECRET,
CONF_CUSTOMIZE,
CONF_HOST,
CONF_ICON,
CONF_NAME,
CONF_SOURCE,
CONF_UNIQUE_ID,
)
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import setup_webostv
from .const import CLIENT_KEY, FAKE_UUID, HOST, MOCK_APPS, MOCK_INPUTS, TV_NAME
MOCK_YAML_CONFIG = {
CONF_HOST: HOST,
CONF_NAME: TV_NAME,
CONF_ICON: "mdi:test",
CONF_CLIENT_SECRET: CLIENT_KEY,
CONF_UNIQUE_ID: FAKE_UUID,
}
MOCK_DISCOVERY_INFO = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=f"http://{HOST}",
upnp={
ssdp.ATTR_UPNP_FRIENDLY_NAME: "LG Webostv",
ssdp.ATTR_UPNP_UDN: f"uuid:{FAKE_UUID}",
},
)
async def test_import(hass, client):
"""Test we can import yaml config."""
assert client
with patch("homeassistant.components.webostv.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_IMPORT},
data=MOCK_YAML_CONFIG,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == TV_NAME
assert result["data"][CONF_HOST] == MOCK_YAML_CONFIG[CONF_HOST]
assert result["data"][CONF_CLIENT_SECRET] == MOCK_YAML_CONFIG[CONF_CLIENT_SECRET]
assert result["result"].unique_id == MOCK_YAML_CONFIG[CONF_UNIQUE_ID]
with patch("homeassistant.components.webostv.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_IMPORT},
data=MOCK_YAML_CONFIG,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"sources",
[
["Live TV", "Input01", "Input02"],
"Live TV, Input01 , Input02",
"Live TV,Input01 ,Input02",
],
)
async def test_import_sources(hass, client, sources):
"""Test import yaml config with sources list/csv."""
assert client
with patch("homeassistant.components.webostv.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_IMPORT},
data={
**MOCK_YAML_CONFIG,
CONF_CUSTOMIZE: {
CONF_SOURCES: sources,
},
},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == TV_NAME
assert result["data"][CONF_HOST] == MOCK_YAML_CONFIG[CONF_HOST]
assert result["data"][CONF_CLIENT_SECRET] == MOCK_YAML_CONFIG[CONF_CLIENT_SECRET]
assert result["options"][CONF_SOURCES] == ["Live TV", "Input01", "Input02"]
assert result["result"].unique_id == MOCK_YAML_CONFIG[CONF_UNIQUE_ID]
async def test_form(hass, client):
"""Test we get the form."""
assert client
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pairing"
with patch("homeassistant.components.webostv.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == TV_NAME
@pytest.mark.parametrize(
"apps, inputs",
[
# Live TV in apps (default)
(MOCK_APPS, MOCK_INPUTS),
# Live TV in inputs
(
{},
{
**MOCK_INPUTS,
"livetv": {"label": "Live TV", "id": "livetv", "appId": LIVE_TV_APP_ID},
},
),
# Live TV not found
({}, MOCK_INPUTS),
],
)
async def test_options_flow_live_tv_in_apps(hass, client, apps, inputs):
"""Test options config flow Live TV found in apps."""
client.apps = apps
client.inputs = inputs
entry = await setup_webostv(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SOURCES: ["Live TV", "Input01", "Input02"]},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"][CONF_SOURCES] == ["Live TV", "Input01", "Input02"]
async def test_options_flow_cannot_retrieve(hass, client):
"""Test options config flow cannot retrieve sources."""
entry = await setup_webostv(hass)
client.connect = Mock(side_effect=ConnectionRefusedError())
result = await hass.config_entries.options.async_init(entry.entry_id)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_retrieve"}
async def test_form_cannot_connect(hass, client):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
client.connect = Mock(side_effect=ConnectionRefusedError())
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_pairexception(hass, client):
"""Test pairing exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
client.connect = Mock(side_effect=WebOsTvPairError("error"))
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "error_pairing"
async def test_entry_already_configured(hass, client):
"""Test entry already configured."""
await setup_webostv(hass)
assert client
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_form_ssdp(hass, client):
"""Test that the ssdp confirmation form is served."""
assert client
with patch("homeassistant.components.webostv.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=MOCK_DISCOVERY_INFO
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pairing"
async def test_ssdp_in_progress(hass, client):
"""Test abort if ssdp paring is already in progress."""
assert client
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=MOCK_YAML_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pairing"
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=MOCK_DISCOVERY_INFO
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_in_progress"
async def test_ssdp_update_uuid(hass, client):
"""Test that ssdp updates existing host entry uuid."""
entry = await setup_webostv(hass, None)
assert client
assert entry.unique_id is None
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=MOCK_DISCOVERY_INFO
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.unique_id == MOCK_DISCOVERY_INFO[ssdp.ATTR_UPNP_UDN][5:]
async def test_ssdp_not_update_uuid(hass, client):
"""Test that ssdp not updates different host."""
entry = await setup_webostv(hass, None)
assert client
assert entry.unique_id is None
discovery_info = dataclasses.replace(MOCK_DISCOVERY_INFO)
discovery_info.ssdp_location = "http://1.2.3.5"
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_SSDP}, data=discovery_info
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "pairing"
assert entry.unique_id is None
async def test_form_abort_uuid_configured(hass, client):
"""Test abort if uuid is already configured, verify host update."""
entry = await setup_webostv(hass, MOCK_DISCOVERY_INFO[ssdp.ATTR_UPNP_UDN][5:])
assert client
assert entry.unique_id == MOCK_DISCOVERY_INFO[ssdp.ATTR_UPNP_UDN][5:]
assert entry.data[CONF_HOST] == HOST
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_config = {
CONF_HOST: "new_host",
CONF_NAME: TV_NAME,
}
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data=user_config,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.data[CONF_HOST] == "new_host"
|
# mode: run
# tag: generator
import sys
def _next(it):
if sys.version_info[0] >= 3:
return next(it)
else:
return it.next()
def test_generator_frame_cycle():
"""
>>> test_generator_frame_cycle()
("I'm done",)
"""
testit = []
def whoo():
try:
yield
except:
yield
finally:
testit.append("I'm done")
g = whoo()
_next(g)
# Frame object cycle
eval('g.throw(ValueError)', {'g': g})
del g
return tuple(testit)
|
# MIT License
#
# Copyright (c) 2022 Ecco Sneaks & Data
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Argument provider for Spark jobs
"""
import argparse
from typing import Optional, Iterable
from spark_utils.common.functions import decrypt_sensitive
from spark_utils.models.job_socket import JobSocket
class DecryptAction(argparse.Action):
"""
Action that performs decryption of a provided value using encryption key provided from the environment.
"""
def __init__(self,
option_strings,
dest,
const=None,
default=None,
required=False,
**kwargs):
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=1,
const=const,
default=default,
required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
arg_value = values[0]
if arg_value.startswith("'"):
arg_value = arg_value[1:]
if arg_value.endswith("'"):
arg_value = arg_value[:-1]
setattr(namespace, self.dest, decrypt_sensitive(arg_value))
class SparkJobArgs:
"""
Argsparse-based Spark job arguments provider
This adds three default arguments to each job:
- --source a|b|c d|e|f ...
Describes inputs used by a job
Here `a` is a mapping key that a developer should use to extract path/format information for the source
`b` is a source path, in URI format: file:///, abfss:// etc.
'c' is a data format: json, csv, delta etc.
- --output a|b|c d|e|f...
Describes output locations used by a job
Same meanings as source attributes
- --overwrite
Controls overwrite behaviour. Will wipe the whole directory if set and honored by job developer.
"""
def __init__(self):
self._parser = argparse.ArgumentParser(description="Runtime arguments")
self._parser.add_argument("--source", type=str, nargs='+', default=[],
help='Sources to read data from, in a form of <source key>:<source path>')
self._parser.add_argument("--output", type=str, nargs='+', default=[],
help='Outputs to write data to, in a form of <output key>:<output path>')
self._parser.add_argument("--overwrite", dest='overwrite', action='store_true', help="Overwrite outputs")
self._parsed_args = None
self._parsed_sources = None
self._parsed_outputs = None
self._overwrite = False
def _sources(self) -> Iterable[JobSocket]:
for source in self._parsed_args.source:
yield JobSocket(*source.split('|'))
def _outputs(self) -> Iterable[JobSocket]:
for output in self._parsed_args.output:
yield JobSocket(*output.split('|'))
def new_arg(self, *args, **kwargs):
"""
Adds one or more new arguments
:param args: argsparse.add_argument(...)
:return:
"""
self._parser.add_argument(*args, **kwargs)
return self
def new_encrypted_arg(self, *args, **kwargs):
"""
Adds an argument that requires decryption before it can be used.
:param args: argsparse.add_argument(...)
:return:
"""
if 'action' not in kwargs:
kwargs.setdefault('action', DecryptAction)
else:
kwargs['action'] = DecryptAction
self._parser.add_argument(*args, **kwargs)
return self
def parse(self, arg_list=None):
"""
Parse args using provided or implicit sys.argv list
:param arg_list:
:return:
"""
if arg_list:
self._parsed_args = self._parser.parse_args(arg_list)
else:
self._parsed_args = self._parser.parse_args()
self._parsed_sources = list(self._sources())
self._parsed_outputs = list(self._outputs())
self._overwrite = self._parsed_args.overwrite
return self
def source(self, key) -> Optional[JobSocket]:
"""
Get a JobSource from input args, by key
:param key: Mapping key
:return:
"""
for parsed in self._parsed_sources:
if parsed.alias == key:
return parsed
return None
def output(self, key) -> Optional[JobSocket]:
"""
Get a JobOutput from input args, by key
:param key: Mapping key
:return:
"""
for parsed in self._parsed_outputs:
if parsed.alias == key:
return parsed
return None
def overwrite(self) -> bool:
"""
Get a value for --overwrite argument
:return: bool
"""
return self._overwrite
@property
def parsed_args(self) -> argparse.Namespace:
"""
Gets the parsed arguments
:return: Namespace
"""
return self._parsed_args
|
from __future__ import print_function
from cStringIO import StringIO
import sys
from rdkit.Chem.rdmolfiles import (
MolFromMol2Block,
MolToPDBBlock,
)
def main(args, stdin=sys.stdin, stdout=sys.stdout):
if len(args) > 0:
path = args[0]
with open(path) as f:
structure = MolFromMol2Block(f.read())
else:
structure = MolFromMol2Block(stdin.read())
pdb = MolToPDBBlock(structure)
stdout.write(pdb)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stdin, sys.stdout))
|
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
length = 20
width = 10
height = 5
x,y,z = mc.player.getTilePos()
x2 = x+length
y2 = y+height
z2 = z+width
mc.setBlocks(x,y,z,x2,y2,z2,8)
mc.setBlocks(x+1,y+1,z+1,x2-1,y2-1,z2-1)
|
import torch
import numpy as np
import argparse
import pybulletgym
from cont_skillspace_test.grid_rollout.rollout_tester_plot_thes \
import RolloutTesterPlotThes
from cont_skillspace_test.grid_rollout.grid_rollouter \
import GridRollouter
from cont_skillspace_test.utils.load_env import load_env
import rlkit.torch.pytorch_util as ptu
parser = argparse.ArgumentParser()
parser.add_argument('--epoch',
type=int,
default=100,
nargs='+',
help="epoch to test",
)
parser.add_argument('--grid_factor',
type=float,
default=None,
help="low, high of skills grid")
parser.add_argument('--num_eval_steps',
type=int,
default=1000,
help="number of rollout steps per io-selected skill",
)
parser.add_argument('--num_grid_points',
type=int,
default=200,
help="number of skill grid points")
parser.add_argument('--plot_height_inches',
type=float,
default=3.7,
help="plot height (inches)")
parser.add_argument('--plot_width_inches',
type=float,
default=3.7,
help="plot width (inches)")
parser.add_argument('--x_label',
type=str,
default=None,
help="x label for plot")
parser.add_argument('--y_label',
type=str,
default=None,
help="y label for plot")
parser.add_argument('--path',
type=str,
default='./grid_rollouts',
help="path variable")
parser.add_argument('--filename',
type=str,
default='savedfig',
help="filename prefix")
args = parser.parse_args()
ptu.set_gpu_mode(False)
epochs = args.epoch
horizon_len = args.num_eval_steps
extension = ".pkl"
if args.grid_factor is None:
config_name = "config" + extension
config = torch.load(config_name)
assert config['skill_prior']['type'] == "uniform"
uniform_prior_low = config['skill_prior']['uniform']['low']
uniform_prior_high = config['skill_prior']['uniform']['high']
else:
uniform_prior_low = -args.grid_factor
uniform_prior_high = args.grid_factor
# Skill prior tuple
uniform_prior_edges = (np.array([uniform_prior_low, uniform_prior_low]),
np.array([uniform_prior_high, uniform_prior_high]))
# Load env
env = load_env()
for epoch in epochs:
# Load policy
policy_net_name = "policy_net_epoch{}".format(epoch) + extension
policy = torch.load(policy_net_name, map_location=ptu.device)
grid_rollouter = GridRollouter(
env=env,
policy=policy,
horizon_len=horizon_len,
)
tester = RolloutTesterPlotThes(
test_rollouter=grid_rollouter,
plot_height_width_inches=(args.plot_height_inches, args.plot_width_inches),
xy_label=(args.x_label, args.y_label),
path=args.path,
save_name_prefix=args.filename,
)
tester(
epoch=epoch,
grid_low=uniform_prior_edges[0],
grid_high=uniform_prior_edges[1],
num_points=args.num_grid_points,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.