gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import os
import sys
import traceback
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import apibinding.inventory as inventory
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class zstack_kvm_share_volume_file_checker(checker_header.TestChecker):
'''check kvm volume file existencex . If it is in host,
return self.judge(True). If not, return self.judge(False)'''
def check(self):
super(zstack_kvm_share_volume_file_checker, self).check()
volume = self.test_obj.volume
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
return self.judge(False)
ps_uuid = volume.primaryStorageUuid
ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid)
#if test_lib.lib_is_ps_iscsi_backend(ps_uuid):
# self.check_iscsi(volume, volume_installPath, ps)
#elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
# self.check_nfs(volume, volume_installPath)
#elif ps.type == inventory.LOCAL_STORAGE_TYPE:
# host = test_lib.lib_get_local_storage_volume_host(volume.uuid)
# if not host:
# return self.judge(False)
# self.check_file_exist(volume, volume_installPath, host)
if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
self.check_ceph(volume, volume_installPath, ps)
elif ps.type == 'SharedBlock':
self.check_sharedblock(volume, volume_installPath, ps)
else:
test_util.test_logger('Check result: [share volume] primary storage is only support ceph, other storage type is not supported.')
#def check_iscsi(self, volume, volume_installPath, ps):
# host = test_lib.lib_find_host_by_iscsi_ps(ps)
# if not host:
# test_util.test_logger('Check result: can not find Host, who owns iscsi filesystem backend. [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
# return self.judge(False)
# test_lib.lib_install_testagent_to_host(host)
# volume_file_path = volume_installPath.split(';')[1].split('file://')[1]
# self.check_file_exist(volume, volume_file_path, host)
def check_ceph(self, volume, volume_installPath, ps):
monHost = ps.mons[0].hostname
for key in os.environ.keys():
if monHost in os.environ.get(key):
ceph_host, username, password = \
test_lib.lib_get_ceph_info(os.environ.get(key))
break
else:
ceph_host = monHost
username = 'root'
password = 'password'
volume_installPath = volume_installPath.split('ceph://')[1]
command = 'rbd info %s' % volume_installPath
if test_lib.lib_execute_ssh_cmd(ceph_host, username, password, command, 10):
test_util.test_logger('Check result: [volume:] %s [file:] %s exist on ceph [host name:] %s .' % (volume.uuid, volume_installPath, ceph_host))
return self.judge(True)
else:
test_util.test_logger('Check result: [volume:] %s [file:] %s does NOT exist on ceph [host name:] %s .' % (volume.uuid, volume_installPath, ceph_host))
return self.judge(False)
def check_sharedblock(self, volume, volume_installPath, ps):
devPath = "/dev/" + volume_installPath.split("sharedblock://")[1]
cmd = 'lvscan'
conditions = res_ops.gen_query_conditions('primaryStorage.uuid', '=', ps.uuid)
cluster = res_ops.query_resource(res_ops.CLUSTER, conditions)[0]
conditions = res_ops.gen_query_conditions('clusterUuid', '=', cluster.uuid)
host = res_ops.query_resource(res_ops.HOST, conditions)[0]
result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd)
if devPath in result:
return self.judge(True)
else:
return self.judge(False)
#def check_nfs(self, volume, volume_installPath):
# host = test_lib.lib_get_volume_object_host(self.test_obj)
# if not host:
# test_util.test_logger('Check result: can not find Host, who is belonged to same Zone Uuid of [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
# return self.judge(False)
# self.check_file_exist(volume, volume_installPath, host)
#def check_file_exist(self, volume, volume_installPath, host):
# cmd = host_plugin.HostShellCmd()
# file_exist = "file_exist"
# cmd.command = '[ -f %s ] && echo %s' % (volume_installPath, file_exist)
# rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, host_plugin.HOST_SHELL_CMD_PATH), cmd)
# rsp = jsonobject.loads(rspstr)
# output = jsonobject.dumps(rsp.stdout)
# if file_exist in output:
# test_util.test_logger('Check result: [volume:] %s [file:] %s exist on [host name:] %s .' % (volume.uuid, volume_installPath, host.managementIp))
# return self.judge(True)
# else:
# test_util.test_logger('Check result: [volume:] %s [file:] %s does not exist on [host name:] %s .' % (volume.uuid, volume_installPath, host.managementIp))
# return self.judge(False)
class zstack_kvm_share_volume_attach_checker(checker_header.TestChecker):
'''
Check if volume is really attached to vm in libvirt system.
'''
def check(self):
super(zstack_kvm_share_volume_attach_checker, self).check()
volume = self.test_obj.volume
sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid)
share_volume_vm_uuids = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid'])
#if not volume.vmInstanceUuid:
if not share_volume_vm_uuids:
test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid)
return self.judge(False)
if not self.test_obj.target_vm:
test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid)
return self.judge(False)
vm = self.test_obj.target_vm.vm
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid)
return self.judge(False)
host = test_lib.lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd)
rsp = jsonobject.loads(rspstr)
output = jsonobject.dumps(rsp.vm_status[vm.uuid])
#if volume_installPath.startswith('iscsi'):
# volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1]
# volume_installPath = 'iqn%s' % volume_installPath
# volume_installPath = volume_installPath[:-2]
#elif volume_installPath.startswith('ceph'):
if volume_installPath.startswith('ceph'):
volume_installPath = volume_installPath.split('ceph://')[1]
elif volume_installPath.startswith('sharedblock'):
volume_installPath = "/dev/" + volume_installPath.split('sharedblock://')[1]
if volume_installPath in output:
test_util.test_logger('Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp))
return self.judge(True)
else:
test_util.test_logger('Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp))
return self.judge(False)
class zstack_kvm_virtioscsi_shareable_checker(checker_header.TestChecker):
'''
Check if volume has shareable label attached to vm in libvirt system.
'''
def check(self):
super(zstack_kvm_virtioscsi_shareable_checker, self).check()
volume = self.test_obj.volume
has_volume = False
shareable = False
check_result = False
#sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid)
#share_volume_vm_uuids = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid'])
#test_util.test_logger('share_volume_vm_uuids is %s' %share_volume_vm_uuids)
print "volume_uuid= %s" %(volume.uuid)
sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid)
volume_vmInstanceUuid = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid'])[0].vmInstanceUuid
pv_cond = res_ops.gen_query_conditions("volume.uuid", '=', volume.uuid)
volume_ps_type = res_ops.query_resource_fields(res_ops.PRIMARY_STORAGE, pv_cond, None, fields=['type'])[0].type
host = test_lib.lib_get_vm_host(test_lib.lib_get_vm_by_uuid(volume_vmInstanceUuid))
test_util.test_logger('vmInstanceUuid_host.ip is %s' %host.managementIp)
test_util.test_logger('vmInstanceUuid is %s' %volume_vmInstanceUuid)
#xml = os.popen('virsh dumpxml %s' % volume.vmInstanceUuid)
xml = os.popen('sshpass -p password ssh root@%s -p %s "virsh dumpxml %s"' %(host.managementIp, host.sshPort, volume_vmInstanceUuid))
tree = ET.parse(xml)
root = tree.getroot()
for domain in root:
if domain.tag == "devices":
for device in domain:
if device.tag == "disk":
for disk in device:
if disk.tag == "source":
if volume_ps_type == "Ceph":
if disk.get("name").find(volume.uuid) > 0:
has_volume = True
if volume_ps_type == "SharedBlock":
if disk.get("file").find(volume.uuid) > 0:
has_volume = True
if disk.tag == "shareable":
shareable = True
if has_volume and shareable:
check_result = True
break
test_util.test_logger('Check result: The result of check VirtioSCSI shareable label is %s' %check_result)
return self.judge(check_result)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPPrefix"]
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPPrefix"]
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPPrefixListResult"]
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
|
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units dealing with node groups."""
import itertools
import logging
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import objects
from ganeti import opcodes
from ganeti import utils
from ganeti.masterd import iallocator
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
from ganeti.cmdlib.common import MergeAndVerifyHvState, \
MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
CheckNodeGroupInstances, GetUpdatedIPolicy, \
ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
CheckDiskAccessModeConsistency, ConnectInstanceCommunicationNetworkOp
import ganeti.masterd.instance
class LUGroupAdd(LogicalUnit):
"""Logical unit for creating node groups.
"""
HPATH = "group-add"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
def ExpandNames(self):
# We need the new group's UUID here so that we can create and acquire the
# corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
# that it should not check whether the UUID exists in the configuration.
self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
self.needed_locks = {}
self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
def _CheckIpolicy(self):
"""Checks the group's ipolicy for consistency and validity.
"""
if self.op.ipolicy:
cluster = self.cfg.GetClusterInfo()
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
try:
objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
except errors.ConfigurationError as err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
CheckIpolicyVsDiskTemplates(full_ipolicy,
cluster.enabled_disk_templates)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the given group name is not an existing node group
already.
"""
try:
existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
except errors.OpPrereqError:
pass
else:
raise errors.OpPrereqError("Desired group name '%s' already exists as a"
" node group (UUID: %s)" %
(self.op.group_name, existing_uuid),
errors.ECODE_EXISTS)
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
if self.op.hv_state:
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state, None)
else:
self.new_hv_state = None
if self.op.disk_state:
self.new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, None)
else:
self.new_disk_state = None
if self.op.diskparams:
for templ in constants.DISK_TEMPLATES:
if templ in self.op.diskparams:
utils.ForceDictType(self.op.diskparams[templ],
constants.DISK_DT_TYPES)
self.new_diskparams = self.op.diskparams
try:
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
except errors.OpPrereqError as err:
raise errors.OpPrereqError("While verify diskparams options: %s" % err,
errors.ECODE_INVAL)
else:
self.new_diskparams = {}
self._CheckIpolicy()
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"GROUP_NAME": self.op.group_name,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
return ([mn], [mn])
@staticmethod
def _ConnectInstanceCommunicationNetwork(cfg, group_uuid, network_name):
"""Connect a node group to the instance communication network.
The group is connected to the instance communication network via
the Opcode 'OpNetworkConnect'.
@type cfg: L{ganeti.config.ConfigWriter}
@param cfg: Ganeti configuration
@type group_uuid: string
@param group_uuid: UUID of the group to connect
@type network_name: string
@param network_name: name of the network to connect to
@rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
@return: L{ganeti.cmdlib.ResultWithJobs} if the group needs to be
connected, otherwise (the group is already connected)
L{None}
"""
try:
cfg.LookupNetwork(network_name)
network_exists = True
except errors.OpPrereqError:
network_exists = False
if network_exists:
op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
return ResultWithJobs([[op]])
else:
return None
def Exec(self, feedback_fn):
"""Add the node group to the cluster.
"""
group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
uuid=self.group_uuid,
alloc_policy=self.op.alloc_policy,
ndparams=self.op.ndparams,
diskparams=self.new_diskparams,
ipolicy=self.op.ipolicy,
hv_state_static=self.new_hv_state,
disk_state_static=self.new_disk_state)
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
network_name = self.cfg.GetClusterInfo().instance_communication_network
if network_name:
return self._ConnectInstanceCommunicationNetwork(self.cfg,
self.group_uuid,
network_name)
class LUGroupAssignNodes(NoHooksLU):
"""Logical unit for assigning nodes to groups.
"""
REQ_BGL = False
def ExpandNames(self):
# These raise errors.OpPrereqError on their own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
# We want to lock all the affected nodes and groups. We have readily
# available the list of nodes, and the *destination* group. To gather the
# list of "source" groups, we need to fetch node information later on.
self.needed_locks = {
locking.LEVEL_NODEGROUP: set([self.group_uuid]),
locking.LEVEL_NODE: self.op.node_uuids,
}
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
# Try to get all affected nodes' groups without having the group or node
# lock yet. Needs verification later in the code flow.
groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
def CheckPrereq(self):
"""Check prerequisites.
"""
assert self.needed_locks[locking.LEVEL_NODEGROUP]
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset(self.op.node_uuids))
expected_locks = (set([self.group_uuid]) |
self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
if actual_locks != expected_locks:
raise errors.OpExecError("Nodes changed groups since locks were acquired,"
" current groups are '%s', used to be '%s'" %
(utils.CommaJoin(expected_locks),
utils.CommaJoin(actual_locks)))
self.node_data = self.cfg.GetAllNodesInfo()
self.group = self.cfg.GetNodeGroup(self.group_uuid)
instance_data = self.cfg.GetAllInstancesInfo()
if self.group is None:
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
(self.op.group_name, self.group_uuid))
(new_splits, previous_splits) = \
self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
for uuid in self.op.node_uuids],
self.node_data, instance_data)
if new_splits:
fmt_new_splits = utils.CommaJoin(utils.NiceSort(
self.cfg.GetInstanceNames(new_splits)))
if not self.op.force:
raise errors.OpExecError("The following instances get split by this"
" change and --force was not given: %s" %
fmt_new_splits)
else:
self.LogWarning("This operation will split the following instances: %s",
fmt_new_splits)
if previous_splits:
self.LogWarning("In addition, these already-split instances continue"
" to be split across groups: %s",
utils.CommaJoin(utils.NiceSort(
self.cfg.GetInstanceNames(previous_splits))))
def Exec(self, feedback_fn):
"""Assign nodes to a new group.
"""
mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
self.cfg.AssignGroupNodes(mods)
def CheckAssignmentForSplitInstances(self, changes, node_data, instance_data):
"""Check for split instances after a node assignment.
This method considers a series of node assignments as an atomic operation,
and returns information about split instances after applying the set of
changes.
In particular, it returns information about newly split instances, and
instances that were already split, and remain so after the change.
Only disks whose template is listed in constants.DTS_INT_MIRROR are
considered.
@type changes: list of (node_uuid, new_group_uuid) pairs.
@param changes: list of node assignments to consider.
@param node_data: a dict with data for all nodes
@param instance_data: a dict with all instances to consider
@rtype: a two-tuple
@return: a list of instances that were previously okay and result split as a
consequence of this change, and a list of instances that were previously
split and this change does not fix.
"""
changed_nodes = dict((uuid, group) for uuid, group in changes
if node_data[uuid].group != group)
all_split_instances = set()
previously_split_instances = set()
for inst in instance_data.values():
inst_disks = self.cfg.GetInstanceDisks(inst.uuid)
if not utils.AnyDiskOfType(inst_disks, constants.DTS_INT_MIRROR):
continue
inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
if len(set(node_data[node_uuid].group
for node_uuid in inst_nodes)) > 1:
previously_split_instances.add(inst.uuid)
if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
for node_uuid in inst_nodes)) > 1:
all_split_instances.add(inst.uuid)
return (list(all_split_instances - previously_split_instances),
list(previously_split_instances & all_split_instances))
class LUGroupSetParams(LogicalUnit):
"""Modifies the parameters of a node group.
"""
HPATH = "group-modify"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
def CheckArguments(self):
all_changes = [
self.op.ndparams,
self.op.diskparams,
self.op.alloc_policy,
self.op.hv_state,
self.op.disk_state,
self.op.ipolicy,
]
if all_changes.count(None) == len(all_changes):
raise errors.OpPrereqError("Please pass at least one modification",
errors.ECODE_INVAL)
if self.op.diskparams:
CheckDiskAccessModeValidity(self.op.diskparams)
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
self.share_locks[locking.LEVEL_INSTANCE] = 1
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
assert not self.needed_locks[locking.LEVEL_INSTANCE]
# Lock instances optimistically, needs verification once group lock has
# been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
@staticmethod
def _UpdateAndVerifyDiskParams(old, new):
"""Updates and verifies disk parameters.
"""
new_params = GetUpdatedParams(old, new)
utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
return new_params
def _CheckIpolicy(self, cluster, owned_instance_names):
"""Sanity checks for the ipolicy.
@type cluster: C{objects.Cluster}
@param cluster: the cluster's configuration
@type owned_instance_names: list of string
@param owned_instance_names: list of instances
"""
if self.op.ipolicy:
self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
self.op.ipolicy,
group_policy=True)
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
CheckIpolicyVsDiskTemplates(new_ipolicy,
cluster.enabled_disk_templates)
instances = \
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
gmi = ganeti.masterd.instance
violations = \
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
self.group),
new_ipolicy, list(instances.values()),
self.cfg)
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
utils.CommaJoin(violations))
def CheckPrereq(self):
"""Check prerequisites.
"""
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
self.group = self.cfg.GetNodeGroup(self.group_uuid)
cluster = self.cfg.GetClusterInfo()
if self.group is None:
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
(self.op.group_name, self.group_uuid))
if self.op.ndparams:
new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
self.new_ndparams = new_ndparams
if self.op.diskparams:
diskparams = self.group.diskparams
uavdp = self._UpdateAndVerifyDiskParams
# For each disktemplate subdict update and verify the values
new_diskparams = dict((dt,
uavdp(diskparams.get(dt, {}),
self.op.diskparams[dt]))
for dt in constants.DISK_TEMPLATES
if dt in self.op.diskparams)
# As we've all subdicts of diskparams ready, lets merge the actual
# dict with all updated subdicts
self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
try:
utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
CheckDiskAccessModeConsistency(self.new_diskparams, self.cfg,
group=self.group)
except errors.OpPrereqError as err:
raise errors.OpPrereqError("While verify diskparams options: %s" % err,
errors.ECODE_INVAL)
if self.op.hv_state:
self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
self.group.hv_state_static)
if self.op.disk_state:
self.new_disk_state = \
MergeAndVerifyDiskState(self.op.disk_state,
self.group.disk_state_static)
self._CheckIpolicy(cluster, owned_instance_names)
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"GROUP_NAME": self.op.group_name,
"NEW_ALLOC_POLICY": self.op.alloc_policy,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
return ([mn], [mn])
def Exec(self, feedback_fn):
"""Modifies the node group.
"""
result = []
if self.op.ndparams:
self.group.ndparams = self.new_ndparams
result.append(("ndparams", str(self.group.ndparams)))
if self.op.diskparams:
self.group.diskparams = self.new_diskparams
result.append(("diskparams", str(self.group.diskparams)))
if self.op.alloc_policy:
self.group.alloc_policy = self.op.alloc_policy
if self.op.hv_state:
self.group.hv_state_static = self.new_hv_state
if self.op.disk_state:
self.group.disk_state_static = self.new_disk_state
if self.op.ipolicy:
self.group.ipolicy = self.new_ipolicy
self.cfg.Update(self.group, feedback_fn)
return result
class LUGroupRemove(LogicalUnit):
HPATH = "group-remove"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
def ExpandNames(self):
# This will raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.needed_locks = {
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
def CheckPrereq(self):
"""Check prerequisites.
This checks that the given group name exists as a node group, that is
empty (i.e., contains no nodes), and that is not the last group of the
cluster.
"""
# Verify that the group is empty.
group_nodes = [node.uuid
for node in self.cfg.GetAllNodesInfo().values()
if node.group == self.group_uuid]
if group_nodes:
raise errors.OpPrereqError("Group '%s' not empty, has the following"
" nodes: %s" %
(self.op.group_name,
utils.CommaJoin(utils.NiceSort(group_nodes))),
errors.ECODE_STATE)
# Verify the cluster would not be left group-less.
if len(self.cfg.GetNodeGroupList()) == 1:
raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
" removed" % self.op.group_name,
errors.ECODE_STATE)
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"GROUP_NAME": self.op.group_name,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
return ([mn], [mn])
def Exec(self, feedback_fn):
"""Remove the node group.
"""
try:
self.cfg.RemoveNodeGroup(self.group_uuid)
except errors.ConfigurationError:
raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
(self.op.group_name, self.group_uuid))
class LUGroupRename(LogicalUnit):
HPATH = "group-rename"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.needed_locks = {
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
def CheckPrereq(self):
"""Check prerequisites.
Ensures requested new name is not yet used.
"""
try:
new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
except errors.OpPrereqError:
pass
else:
raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
" node group (UUID: %s)" %
(self.op.new_name, new_name_uuid),
errors.ECODE_EXISTS)
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"OLD_NAME": self.op.group_name,
"NEW_NAME": self.op.new_name,
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
all_nodes = self.cfg.GetAllNodesInfo()
all_nodes.pop(mn, None)
run_nodes = [mn]
run_nodes.extend(node.uuid for node in all_nodes.values()
if node.group == self.group_uuid)
return (run_nodes, run_nodes)
def Exec(self, feedback_fn):
"""Rename the node group.
"""
group = self.cfg.GetNodeGroup(self.group_uuid)
if group is None:
raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
(self.op.group_name, self.group_uuid))
group.name = self.op.new_name
self.cfg.Update(group, feedback_fn)
return self.op.new_name
class LUGroupEvacuate(LogicalUnit):
HPATH = "group-evacuate"
HTYPE = constants.HTYPE_GROUP
REQ_BGL = False
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
if self.op.target_groups:
self.req_target_uuids = [self.cfg.LookupNodeGroup(g)
for g in self.op.target_groups]
else:
self.req_target_uuids = []
if self.group_uuid in self.req_target_uuids:
raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
" as a target group (targets are %s)" %
(self.group_uuid,
utils.CommaJoin(self.req_target_uuids)),
errors.ECODE_INVAL)
self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
}
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
assert not self.needed_locks[locking.LEVEL_INSTANCE]
# Lock instances optimistically, needs verification once node and group
# locks have been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
elif level == locking.LEVEL_NODEGROUP:
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
if self.req_target_uuids:
lock_groups = set([self.group_uuid] + self.req_target_uuids)
# Lock all groups used by instances optimistically; this requires going
# via the node before it's locked, requiring verification later on
lock_groups.update(group_uuid
for instance_name in
self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in
self.cfg.GetInstanceNodeGroups(
self.cfg.GetInstanceInfoByName(instance_name)
.uuid))
else:
# No target groups, need to lock all of them
lock_groups = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
elif level == locking.LEVEL_NODE:
# This will only lock the nodes in the group to be evacuated which
# contain actual instances
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
self._LockInstancesNodes()
# Lock all nodes in group to be evacuated and target groups
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
assert self.group_uuid in owned_groups
member_node_uuids = [node_uuid
for group in owned_groups
for node_uuid in
self.cfg.GetNodeGroup(group).members]
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
def CheckPrereq(self):
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert owned_groups.issuperset(self.req_target_uuids)
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
# Get instance information
self.instances = \
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
# Check if node groups for locked instances are still correct
CheckInstancesNodeGroups(self.cfg, self.instances,
owned_groups, owned_node_uuids, self.group_uuid)
if self.req_target_uuids:
# User requested specific target groups
self.target_uuids = self.req_target_uuids
else:
# All groups except the one to be evacuated are potential targets
self.target_uuids = [group_uuid for group_uuid in owned_groups
if group_uuid != self.group_uuid]
if not self.target_uuids:
raise errors.OpPrereqError("There are no possible target groups",
errors.ECODE_INVAL)
def BuildHooksEnv(self):
"""Build hooks env.
"""
return {
"GROUP_NAME": self.op.group_name,
"TARGET_GROUPS": " ".join(self.target_uuids),
}
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
mn = self.cfg.GetMasterNode()
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
return (run_nodes, run_nodes)
@staticmethod
def _MigrateToFailover(op):
"""Return an equivalent failover opcode for a migrate one.
If the argument is not a failover opcode, return it unchanged.
"""
if not isinstance(op, opcodes.OpInstanceMigrate):
return op
else:
return opcodes.OpInstanceFailover(
instance_name=op.instance_name,
instance_uuid=getattr(op, "instance_uuid", None),
target_node=getattr(op, "target_node", None),
target_node_uuid=getattr(op, "target_node_uuid", None),
ignore_ipolicy=op.ignore_ipolicy,
cleanup=op.cleanup)
def Exec(self, feedback_fn):
inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
assert self.group_uuid not in self.target_uuids
req = iallocator.IAReqGroupChange(instances=inst_names,
target_groups=self.target_uuids)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
raise errors.OpPrereqError("Can't compute group evacuation using"
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
errors.ECODE_NORES)
jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
len(jobs), self.op.group_name)
if self.op.force_failover:
self.LogInfo("Will insist on failovers")
jobs = [[self._MigrateToFailover(op) for op in job] for job in jobs]
if self.op.sequential:
self.LogInfo("Jobs will be submitted to run sequentially")
for job in jobs[1:]:
for op in job:
op.depends = [(-1, ["error", "success"])]
return ResultWithJobs(jobs)
class LUGroupVerifyDisks(NoHooksLU):
"""Verifies the status of all disks in a node group.
"""
REQ_BGL = False
def ExpandNames(self):
# Raises errors.OpPrereqError on its own if group can't be found
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
}
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
assert not self.needed_locks[locking.LEVEL_INSTANCE]
# Lock instances optimistically, needs verification once node and group
# locks have been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
elif level == locking.LEVEL_NODEGROUP:
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.needed_locks[locking.LEVEL_NODEGROUP] = \
set([self.group_uuid] +
# Lock all groups used by instances optimistically; this requires
# going via the node before it's locked, requiring verification
# later on
[group_uuid
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in
self.cfg.GetInstanceNodeGroups(
self.cfg.GetInstanceInfoByName(instance_name).uuid)])
elif level == locking.LEVEL_NODE:
# This will only lock the nodes in the group to be verified which contain
# actual instances
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
self._LockInstancesNodes()
# Lock all nodes in group to be verified
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
def CheckPrereq(self):
owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
# Get instance information
self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
# Check if node groups for locked instances are still correct
CheckInstancesNodeGroups(self.cfg, self.instances,
owned_groups, owned_node_uuids, self.group_uuid)
def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
missing_disks):
node_lv_to_inst = MapInstanceLvsToNodes(
self.cfg,
[inst for inst in self.instances.values() if inst.disks_active])
if node_lv_to_inst:
node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
set(self.cfg.GetVmCapableNodeList()))
node_lvs = self.rpc.call_lv_list(node_uuids, [])
for (node_uuid, node_res) in node_lvs.items():
if node_res.offline:
continue
msg = node_res.fail_msg
if msg:
logging.warning("Error enumerating LVs on node %s: %s",
self.cfg.GetNodeName(node_uuid), msg)
node_errors[node_uuid] = msg
continue
for lv_name, (_, _, lv_online) in node_res.payload.items():
inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
if not lv_online and inst is not None:
offline_disk_instance_names.add(inst.name)
# any leftover items in nv_dict are missing LVs, let's arrange the data
# better
for key, inst in node_lv_to_inst.items():
missing_disks.setdefault(inst.name, []).append(list(key))
def _VerifyDrbdStates(self, node_errors, offline_disk_instance_names):
node_to_inst = {}
for inst in self.instances.values():
disks = self.cfg.GetInstanceDisks(inst.uuid)
if not (inst.disks_active and
utils.AnyDiskOfType(disks, [constants.DT_DRBD8])):
continue
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(inst.uuid)
for node_uuid in itertools.chain([inst.primary_node],
secondary_nodes):
node_to_inst.setdefault(node_uuid, []).append(inst)
for (node_uuid, insts) in node_to_inst.items():
node_disks = [(self.cfg.GetInstanceDisks(inst.uuid), inst)
for inst in insts]
node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
msg = node_res.fail_msg
if msg:
logging.warning("Error getting DRBD status on node %s: %s",
self.cfg.GetNodeName(node_uuid), msg)
node_errors[node_uuid] = msg
continue
faulty_disk_uuids = set(node_res.payload)
for inst in self.instances.values():
disks = self.cfg.GetInstanceDisks(inst.uuid)
inst_disk_uuids = set([disk.uuid for disk in disks])
if inst_disk_uuids.intersection(faulty_disk_uuids):
offline_disk_instance_names.add(inst.name)
def Exec(self, feedback_fn):
"""Verify integrity of cluster disks.
@rtype: tuple of three items
@return: a tuple of (dict of node-to-node_error, list of instances
which need activate-disks, dict of instance: (node, volume) for
missing volumes
"""
node_errors = {}
offline_disk_instance_names = set()
missing_disks = {}
self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
missing_disks)
self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
return (node_errors, list(offline_disk_instance_names), missing_disks)
|
|
import random
import string
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Categorical, date_range, read_csv
from pandas.compat import cStringIO as StringIO
from ..pandas_vb_common import BaseIO
class ToCSV(BaseIO):
fname = '__test__.csv'
params = ['wide', 'long', 'mixed']
param_names = ['kind']
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame({'A': np.arange(50000),
'B': np.arange(50000) + 1.,
'C': np.arange(50000) + 2.,
'D': np.arange(50000) + 3.})
mixed_frame = DataFrame({'float': np.random.randn(5000),
'int': np.random.randn(5000).astype(int),
'bool': (np.arange(5000) % 2) == 0,
'datetime': date_range('2001',
freq='s',
periods=5000),
'object': ['foo'] * 5000})
mixed_frame.loc[30:500, 'float'] = np.nan
data = {'wide': wide_frame,
'long': long_frame,
'mixed': mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
fname = '__test__.csv'
def setup(self):
rng = date_range('1/1/2000', periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
class StringIORewind(object):
def data(self, stringio_object):
stringio_object.seek(0)
return stringio_object
class ReadCSVDInferDatetimeFormat(StringIORewind):
params = ([True, False], ['custom', 'iso8601', 'ymd'])
param_names = ['infer_datetime_format', 'format']
def setup(self, infer_datetime_format, format):
rng = date_range('1/1/2000', periods=1000)
formats = {'custom': '%m/%d/%Y %H:%M:%S.%f',
'iso8601': '%Y-%m-%d %H:%M:%S',
'ymd': '%Y%m%d'}
dt_format = formats[format]
self.StringIO_input = StringIO('\n'.join(
rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(self.data(self.StringIO_input),
header=None, names=['foo'], parse_dates=['foo'],
infer_datetime_format=infer_datetime_format)
class ReadCSVSkipRows(BaseIO):
fname = '__test__.csv'
params = [None, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame({'float1': np.random.randn(N),
'float2': np.random.randn(N),
'string1': ['foo'] * N,
'bool1': [True] * N,
'int1': np.random.randint(0, N, size=N)},
index=index)
df.to_csv(self.fname)
def time_skipprows(self, skiprows):
read_csv(self.fname, skiprows=skiprows)
class ReadUint64Integers(StringIORewind):
def setup(self):
self.na_values = [2**63 + 500]
arr = np.arange(10000).astype('uint64') + 2**63
self.data1 = StringIO('\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO('\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self):
read_csv(self.data(self.data1), header=None, names=['foo'])
def time_read_uint64_neg_values(self):
read_csv(self.data(self.data2), header=None, names=['foo'])
def time_read_uint64_na_values(self):
read_csv(self.data(self.data1), header=None, names=['foo'],
na_values=self.na_values)
class ReadCSVThousands(BaseIO):
fname = '__test__.csv'
params = ([',', '|'], [None, ','])
param_names = ['sep', 'thousands']
def setup(self, sep, thousands):
N = 10000
K = 8
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = ':{}'.format(thousands)
fmt = '{' + fmt + '}'
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
def time_thousands(self, sep, thousands):
read_csv(self.fname, sep=sep, thousands=thousands)
class ReadCSVComment(StringIORewind):
def setup(self):
data = ['A,B,C'] + (['1,2,3 # comment'] * 100000)
self.StringIO_input = StringIO('\n'.join(data))
def time_comment(self):
read_csv(self.data(self.StringIO_input), comment='#',
header=None, names=list('abc'))
class ReadCSVFloatPrecision(StringIORewind):
params = ([',', ';'], ['.', '_'], [None, 'high', 'round_trip'])
param_names = ['sep', 'decimal', 'float_precision']
def setup(self, sep, decimal, float_precision):
floats = [''.join(random.choice(string.digits) for _ in range(28))
for _ in range(15)]
rows = sep.join(['0{}'.format(decimal) + '{}'] * 3) + '\n'
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(self.data(self.StringIO_input), sep=sep, header=None,
names=list('abc'), float_precision=float_precision)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(self.data(self.StringIO_input), sep=sep, header=None,
engine='python', float_precision=None, names=list('abc'))
class ReadCSVCategorical(BaseIO):
fname = '__test__.csv'
def setup(self):
N = 100000
group1 = ['aaaaaaaa', 'bbbbbbb', 'cccccccc', 'dddddddd', 'eeeeeeee']
df = DataFrame(np.random.choice(group1, (N, 3)), columns=list('abc'))
df.to_csv(self.fname, index=False)
def time_convert_post(self):
read_csv(self.fname).apply(Categorical)
def time_convert_direct(self):
read_csv(self.fname, dtype='category')
class ReadCSVParseDates(StringIORewind):
def setup(self):
data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n
{},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n
{},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n
{},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n
{},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n
"""
two_cols = ['KORD,19990127'] * 5
data = data.format(*two_cols)
self.StringIO_input = StringIO(data)
def time_multiple_date(self):
read_csv(self.data(self.StringIO_input), sep=',', header=None,
names=list(string.digits[:9]),
parse_dates=[[1, 2], [1, 3]])
def time_baseline(self):
read_csv(self.data(self.StringIO_input), sep=',', header=None,
parse_dates=[1],
names=list(string.digits[:9]))
from ..pandas_vb_common import setup # noqa: F401
|
|
# -*- coding: utf-8 -*-
from logging import getLogger
from os.path import join
from django.conf import settings
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import get_language, ugettext_lazy as _
from cms import constants
from cms.constants import PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY, TEMPLATE_INHERITANCE_MAGIC
from cms.exceptions import PublicIsUnmodifiable, LanguageError, PublicVersionNeeded
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.publisher.errors import PublisherCantPublish
from cms.utils import i18n, page as page_utils
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from menus.menu_pool import menu_pool
from treebeard.mp_tree import MP_Node
logger = getLogger(__name__)
@python_2_unicode_compatible
class Page(six.with_metaclass(PageMetaClass, MP_Node)):
"""
A simple hierarchical page model
"""
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(constants.VISIBILITY_USERS, _('for logged in users only')),
(constants.VISIBILITY_ANONYMOUS, _('for anonymous users only')),
)
TEMPLATE_DEFAULT = TEMPLATE_INHERITANCE_MAGIC if get_cms_setting('TEMPLATE_INHERITANCE') else get_cms_setting('TEMPLATES')[0][0]
X_FRAME_OPTIONS_INHERIT = 0
X_FRAME_OPTIONS_DENY = 1
X_FRAME_OPTIONS_SAMEORIGIN = 2
X_FRAME_OPTIONS_ALLOW = 3
X_FRAME_OPTIONS_CHOICES = (
(X_FRAME_OPTIONS_INHERIT, _('Inherit from parent page')),
(X_FRAME_OPTIONS_DENY, _('Deny')),
(X_FRAME_OPTIONS_SAMEORIGIN, _('Only this website')),
(X_FRAME_OPTIONS_ALLOW, _('Allow'))
)
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(
_("created by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
changed_by = models.CharField(
_("changed by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_(
'When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True,
help_text=_('When to expire the page. Leave empty to never expire.'),
db_index=True)
#
# Please use toggle_in_navigation() instead of affecting this property
# directly so that the cms page cache can be invalidated as appropriate.
#
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False,
help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_(
"A unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
template = models.CharField(_("template"), max_length=100, choices=template_choices,
help_text=_('The template used to render the content.'),
default=TEMPLATE_DEFAULT)
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"),
related_name='djangocms_pages')
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=None, null=True, blank=True,
choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True,
help_text=_("limit when this page is visible in the menu"))
is_home = models.BooleanField(editable=False, db_index=True, default=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
application_namespace = models.CharField(_('application instance name'), max_length=200, blank=True, null=True)
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
languages = models.CharField(max_length=255, editable=False, blank=True, null=True)
# If the draft is loaded from a reversion version save the revision id here.
revision_id = models.PositiveIntegerField(default=0, editable=False)
# X Frame Options for clickjacking protection
xframe_options = models.IntegerField(
choices=X_FRAME_OPTIONS_CHOICES,
default=getattr(settings, 'CMS_DEFAULT_X_FRAME_OPTIONS', X_FRAME_OPTIONS_INHERIT)
)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
('edit_static_placeholder', 'Can edit static placeholders'),
)
unique_together = (("publisher_is_draft", "site", "application_namespace"),
("reverse_id", "site", "publisher_is_draft"))
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('path',)
app_label = 'cms'
def __str__(self):
try:
title = self.get_menu_title(fallback=True)
except LanguageError:
try:
title = self.title_set.all()[0]
except IndexError:
title = None
if title is None:
title = u""
return force_text(title)
def __repr__(self):
# This is needed to solve the infinite recursion when
# adding new pages.
return object.__repr__(self)
def is_dirty(self, language):
state = self.get_publisher_state(language)
return state == PUBLISHER_STATE_DIRTY or state == PUBLISHER_STATE_PENDING
def get_absolute_url(self, language=None, fallback=True):
if not language:
language = get_language()
with i18n.force_language(language):
if self.is_home:
return reverse('pages-root')
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def get_public_url(self, language=None, fallback=True):
"""
Returns the URL of the published version of the current page.
Returns empty string if the page is not published.
"""
try:
return self.get_public_object().get_absolute_url(language, fallback)
except:
return ''
def get_draft_url(self, language=None, fallback=True):
"""
Returns the URL of the draft version of the current page.
Returns empty string if the draft page is not available.
"""
try:
return self.get_draft_object().get_absolute_url(language, fallback)
except:
return ''
def move_page(self, target, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
assert self.publisher_is_draft
# do not mark the page as dirty after page moves
self._publisher_keep_state = True
# readability counts :)
is_inherited_template = self.template == constants.TEMPLATE_INHERITANCE_MAGIC
# make sure move_page does not break when using INHERIT template
# and moving to a top level position
if position in ('left', 'right') and not target.parent and is_inherited_template:
self.template = self.get_template()
if target.publisher_public_id and position == 'right':
public = target.publisher_public
if target.get_root().get_next_sibling().pk == public.get_root().pk:
target = target.publisher_public
else:
logger.warning('tree may need rebuilding: run manage.py cms fix-tree')
if position == 'first-child' or position == 'last-child':
self.parent_id = target.pk
else:
self.parent_id = target.parent_id
self.save()
moved_page = self.move(target, pos=position)
# fire signal
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=moved_page)
# check the slugs
page_utils.check_title_slugs(moved_page)
## Make sure to update the slug and path of the target page.
page_utils.check_title_slugs(target)
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
# Ensure that the page is in the right position and save it
moved_page._publisher_save_public(public_page)
public_page = public_page.reload()
cms_signals.page_moved.send(sender=Page, instance=public_page)
page_utils.check_title_slugs(public_page)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
def _copy_titles(self, target, language, published):
"""
Copy all the titles to a new page (which must have a pk).
:param target: The page where the new titles should be stored
"""
from .titlemodels import Title
old_titles = dict(target.title_set.filter(language=language).values_list('language', 'pk'))
for title in self.title_set.filter(language=language):
old_pk = title.pk
# If an old title exists, overwrite. Otherwise create new
title.pk = old_titles.pop(title.language, None)
title.page = target
title.publisher_is_draft = target.publisher_is_draft
title.publisher_public_id = old_pk
if published:
title.publisher_state = PUBLISHER_STATE_DEFAULT
else:
title.publisher_state = PUBLISHER_STATE_PENDING
title.published = published
title._publisher_keep_state = True
title.save()
old_title = Title.objects.get(pk=old_pk)
old_title.publisher_public = title
old_title.publisher_state = title.publisher_state
old_title.published = True
old_title._publisher_keep_state = True
old_title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = old_title
if old_titles:
Title.objects.filter(id__in=old_titles.values()).delete()
def _copy_contents(self, target, language):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
# TODO: Make this into a "graceful" copy instead of deleting and overwriting
# copy the placeholders (and plugins on those placeholders!)
from cms.plugin_pool import plugin_pool
plugin_pool.set_plugin_meta()
for plugin in CMSPlugin.objects.filter(placeholder__page=target, language=language).order_by('-depth'):
inst, cls = plugin.get_plugin_instance()
if inst and getattr(inst, 'cmsplugin_ptr_id', False):
inst.cmsplugin_ptr = plugin
inst.cmsplugin_ptr._no_reorder = True
inst.delete(no_mp=True)
else:
plugin._no_reorder = True
plugin.delete(no_mp=True)
new_phs = []
target_phs = target.placeholders.all()
for ph in self.get_placeholders():
plugins = ph.get_plugins_list(language)
found = False
for target_ph in target_phs:
if target_ph.slot == ph.slot:
ph = target_ph
found = True
break
if not found:
ph.pk = None # make a new instance
ph.save()
new_phs.append(ph)
# update the page copy
if plugins:
copy_plugins_to(plugins, ph, no_signals=True)
target.placeholders.add(*new_phs)
def _copy_attributes(self, target, clean=False):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
if not clean:
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.reverse_id = self.reverse_id
target.login_required = self.login_required
target.in_navigation = self.in_navigation
target.soft_root = self.soft_root
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.navigation_extenders = self.navigation_extenders
target.application_urls = self.application_urls
target.application_namespace = self.application_namespace
target.template = self.template
target.site_id = self.site_id
target.xframe_options = self.xframe_options
def copy_page(self, target, site, position='first-child',
copy_permissions=True):
"""
Copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note for issue #1166: when copying pages there is no need to check for
conflicting URLs as pages are copied unpublished.
"""
from cms.extensions import extension_pool
if not self.publisher_is_draft:
raise PublicIsUnmodifiable("copy page is not allowed for public pages")
pages = list(self.get_descendants(True).order_by('path'))
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child" or position == "last-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
first_page = None
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.get_placeholders())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = old_pk = page.pk
page.pk = None
page.path = None
page.depth = None
page.numchild = 0
page.publisher_public_id = None
page.is_home = False
page.site = site
# only set reverse_id on standard copy
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.save()
first_page = page
if target:
page = page.move(target, pos=position)
page.old_pk = old_pk
else:
count = 1
found = False
for prnt in tree:
if tree[0].pk == self.pk and page.parent_id == self.pk and count == 1:
count += 1
continue
elif prnt.old_pk == page.parent_id:
page.parent_id = prnt.pk
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
page.parent_id = None
page.save()
tree.append(page)
# copy permissions if necessary
if get_cms_setting('PERMISSION') and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
# copy titles of this page
draft_titles = {}
for title in titles:
title.pk = None # setting pk = None creates a new instance
title.page = page
if title.publisher_public_id:
draft_titles[title.publisher_public_id] = title
title.publisher_public = None
# create slug-copy for standard copy
title.published = False
title.slug = page_utils.get_available_slug(title)
title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = ph.get_plugins_list()
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
if plugins:
copy_plugins_to(plugins, ph)
extension_pool.copy_extensions(Page.objects.get(pk=origin_id), page)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
return first_page
def save(self, no_signals=False, commit=True, **kwargs):
"""
Args:
commit: True if model should be really saved
"""
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
created = not bool(self.pk)
if self.reverse_id == "":
self.reverse_id = None
if self.application_namespace == "":
self.application_namespace = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
try:
changed_by = force_text(user)
except AttributeError:
# AnonymousUser may not have USERNAME_FIELD
changed_by = "anonymous"
else:
# limit changed_by and created_by to avoid problems with Custom User Model
if len(changed_by) > constants.PAGE_USERNAME_MAX_LENGTH:
changed_by = u'{0}... (id={1})'.format(
changed_by[:constants.PAGE_USERNAME_MAX_LENGTH - 15],
user.pk,
)
self.changed_by = changed_by
else:
self.changed_by = "script"
if created:
self.created_by = self.changed_by
if commit:
if not self.depth:
if self.parent_id:
self.parent.add_child(instance=self)
else:
self.add_root(instance=self)
return #add_root and add_child save as well
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.title_set.all().update(publisher_state=PUBLISHER_STATE_DIRTY)
if keep_state:
delattr(self, '_publisher_keep_state')
return super(Page, self).save_base(*args, **kwargs)
def is_new_dirty(self):
if self.pk:
fields = [
'publication_date', 'publication_end_date', 'in_navigation', 'soft_root', 'reverse_id',
'navigation_extenders', 'template', 'login_required', 'limit_visibility_in_menu'
]
try:
old_page = Page.objects.get(pk=self.pk)
except Page.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_page, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
def is_published(self, language, force_reload=False):
return self.get_title_obj(language, False, force_reload=force_reload).published
def toggle_in_navigation(self, set_to=None):
'''
Toggles (or sets) in_navigation and invalidates the cms page cache
'''
old = self.in_navigation
if set_to in [True, False]:
self.in_navigation = set_to
else:
self.in_navigation = not self.in_navigation
self.save()
#
# If there was a change, invalidate the cms page cache
#
if self.in_navigation != old:
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
return self.in_navigation
def get_publisher_state(self, language, force_reload=False):
try:
return self.get_title_obj(language, False, force_reload=force_reload).publisher_state
except AttributeError:
return None
def set_publisher_state(self, language, state, published=None):
title = self.title_set.get(language=language)
title.publisher_state = state
if published is not None:
title.published = published
title._publisher_keep_state = True
title.save()
if hasattr(self, 'title_cache') and language in self.title_cache:
self.title_cache[language].publisher_state = state
return title
def publish(self, language):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
:returns: True if page was successfully published.
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
# be sure we have the newest data including mptt
p = Page.objects.get(pk=self.pk)
self.path = p.path
self.depth = p.depth
self.numchild = p.numchild
if self._publisher_can_publish():
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
else:
public_page = Page(created_by=self.created_by)
if not self.publication_date:
self.publication_date = now()
self._copy_attributes(public_page)
# we need to set relate this new public copy to its draft page (self)
public_page.publisher_public = self
public_page.publisher_is_draft = False
# Ensure that the page is in the right position and save it
self._publisher_save_public(public_page)
public_page = public_page.reload()
published = public_page.parent_id is None or public_page.parent.is_published(language)
if not public_page.pk:
public_page.save()
# The target page now has a pk, so can be used as a target
self._copy_titles(public_page, language, published)
self._copy_contents(public_page, language)
# trigger home update
public_page.save()
# invalidate the menu for this site
menu_pool.clear(site_id=self.site_id)
self.publisher_public = public_page
published = True
else:
# Nothing left to do
pass
if not published:
self.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=True)
self._publisher_keep_state = True
self.save()
# If we are publishing, this page might have become a "home" which
# would change the path
if self.is_home:
for title in self.title_set.all():
if title.path != '':
title._publisher_keep_state = True
title.save()
if not published:
# was not published, escape
return
# Check if there are some children which are waiting for parents to
# become published.
from cms.models import Title
publish_set = list(self.get_descendants().filter(title_set__published=True,
title_set__language=language).select_related('publisher_public', 'publisher_public__parent').order_by('depth', 'path'))
#prefetch the titles
publish_ids = {}
for page in publish_set:
publish_ids[page.pk] = None
if page.publisher_public_id:
publish_ids[page.publisher_public.pk] = None
titles = Title.objects.filter(page__pk__in=publish_ids.keys(), language=language)
for title in titles:
publish_ids[title.page_id] = title
for page in publish_set:
if page.pk in publish_ids and publish_ids[page.pk]:
page.title_cache = {}
page.title_cache[language] = publish_ids[page.pk]
if page.publisher_public_id:
if not page.publisher_public.parent_id:
page._publisher_save_public(page.publisher_public)
#query and caching optimization
if page.publisher_public.parent_id and not page.publisher_public.parent:
page.publisher_public.parent = Page.objects.get(pk=page.publisher_public.parent_id)
if page.publisher_public.parent_id in publish_ids:
page.publisher_public.parent.title_cache = {}
page.publisher_public.parent.title_cache[language] = publish_ids[page.publisher_public.parent_id]
if page.publisher_public.parent.is_published(language):
if page.publisher_public_id in publish_ids:
public_title = publish_ids[page.publisher_public_id]
else:
public_title = None
draft_title = publish_ids[page.pk]
if public_title and not public_title.published:
public_title._publisher_keep_state = True
public_title.published = True
public_title.publisher_state = PUBLISHER_STATE_DEFAULT
public_title.save()
if draft_title.publisher_state == PUBLISHER_STATE_PENDING:
draft_title.publisher_state = PUBLISHER_STATE_DEFAULT
draft_title._publisher_keep_state = True
draft_title.save()
elif page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
page.publish(language)
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self, language=language)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
return published
def unpublish(self, language):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
# First, make sure we are in the correct state
title = self.title_set.get(language=language)
public_title = title.publisher_public
title.published = False
title.publisher_state = PUBLISHER_STATE_DIRTY
title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = title
public_title.published = False
public_title.save()
public_page = self.publisher_public
public_placeholders = public_page.get_placeholders()
for pl in public_placeholders:
pl.cmsplugin_set.filter(language=language).delete()
public_page.save()
# trigger update home
self.save()
self.mark_descendants_pending(language)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
from cms.signals import post_unpublish
post_unpublish.send(sender=Page, instance=self, language=language)
return True
def mark_descendants_pending(self, language):
assert self.publisher_is_draft
# Go through all children of our public instance
public_page = self.publisher_public
from cms.models import Title
if public_page:
descendants = public_page.get_descendants().filter(title_set__language=language)
for child in descendants:
try:
child.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=False)
except Title.DoesNotExist:
continue
draft = child.publisher_public
if draft and draft.is_published(language) and draft.get_publisher_state(
language) == PUBLISHER_STATE_DEFAULT:
draft.set_publisher_state(language, PUBLISHER_STATE_PENDING)
def revert(self, language):
"""Revert the draft version to the same state as the public version
"""
# Revert can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
if not self.publisher_public:
raise PublicVersionNeeded('A public version of this page is needed')
public = self.publisher_public
public._copy_titles(self, language, public.is_published(language))
public._copy_contents(self, language)
public._copy_attributes(self)
self.title_set.filter(language=language).update(publisher_state=PUBLISHER_STATE_DEFAULT, published=True)
self.revision_id = 0
self._publisher_keep_state = True
self.save()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
if self.languages:
return sorted(self.languages.split(','))
else:
return []
def get_descendants(self, include_self=False):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
if include_self:
return self.__class__.get_tree(self).filter(site_id=self.site_id)
else:
return self.__class__.get_tree(self).exclude(pk=self.pk).filter(site_id=self.site_id)
def get_cached_ancestors(self):
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors())
return self.ancestors_ascending
def get_cached_descendants(self):
if not hasattr(self, "_cached_descendants"):
self._cached_descendants = list(self.get_descendants())
return self._cached_descendants
# ## Title object access
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle(language)
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_placeholders(self):
if not hasattr(self, '_placeholder_cache'):
self._placeholder_cache = self.placeholders.all()
return self._placeholder_cache
def _validate_title(self, title):
from cms.models.titlemodels import EmptyTitle
if isinstance(title, EmptyTitle):
return False
if not title.title or not title.slug:
return False
return True
def get_admin_tree_title(self):
from cms.models.titlemodels import EmptyTitle
language = get_language()
if not hasattr(self, 'title_cache'):
self.title_cache = {}
for title in self.title_set.all():
self.title_cache[title.language] = title
if language not in self.title_cache or not self._validate_title(self.title_cache.get(language, EmptyTitle(language))):
fallback_langs = i18n.get_fallback_languages(language)
found = False
for lang in fallback_langs:
if lang in self.title_cache and self._validate_title(self.title_cache.get(lang, EmptyTitle(lang))):
found = True
language = lang
if not found:
language = None
for lang, item in self.title_cache.items():
if not isinstance(item, EmptyTitle):
language = lang
if not language:
return _("Empty")
title = self.title_cache[language]
if title.title:
return title.title
if title.page_title:
return title.page_title
if title.menu_title:
return title.menu_title
return title.slug
def get_changed_date(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get when this page was last updated
"""
return self.changed_date
def get_changed_by(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get user who last changed this page
"""
return self.changed_by
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.application_urls
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif language not in self.title_cache:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
titles = Title.objects.filter(page=self)
for title in titles:
self.title_cache[title.language] = title
if language in self.title_cache:
return language
else:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
template = None
if self.template:
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
try:
template = self.get_ancestors().exclude(
template=constants.TEMPLATE_INHERITANCE_MAGIC).values_list('template', flat=True)[0]
except IndexError:
pass
if not template:
template = get_cms_setting('TEMPLATES')[0][0]
self._template_cache = template
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, request, user=None):
if not user:
user = request.user
from cms.utils.permissions import get_any_page_view_permissions, has_global_page_permission
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and user.is_staff)
# inherited and direct view permissions
is_restricted = bool(get_any_page_view_permissions(request, self))
if not is_restricted and can_see_unrestricted:
return True
elif not user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if has_global_page_permission(request, self.site_id, user=user, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if self.get_draft_object().has_generic_permission(request, "view", user=user):
return True
# The user has a normal django permission to view pages globally
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return request.user.has_perm(codename)
def has_change_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
and self.has_generic_permission(request, "change"))
def has_delete_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('delete', opts))
and self.has_generic_permission(request, "delete"))
def has_publish_permission(self, request, user=None):
if not user:
user = request.user
if user.is_superuser:
return True
opts = self._meta
return (user.has_perm(opts.app_label + '.' + "publish_page")
and self.has_generic_permission(request, "publish"))
has_moderate_permission = has_publish_permission
def has_advanced_settings_permission(self, request, user=None):
return self.has_generic_permission(request, "advanced_settings", user)
def has_change_permissions_permission(self, request, user=None):
"""
Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions", user)
def has_add_permission(self, request, user=None):
"""
Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add", user)
def has_move_page_permission(self, request, user=None):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page", user)
def has_generic_permission(self, request, perm_type, user=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if not user:
user = request.user
att_name = "permission_%s_cache" % perm_type
if (not hasattr(self, "permission_user_cache")
or not hasattr(self, att_name)
or user.pk != self.permission_user_cache.pk):
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = user
setattr(self, att_name, has_generic_permission(
self.pk, user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing
page-scope files. This allows multiple pages to contain files with
identical names without namespace issues. Plugins such as Picture can
use this method to initialise the 'upload_to' parameter for File-based
fields. For example:
image = models.ImageField(
_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.pk, filename)
def reload(self):
"""
Reload a page from the database
"""
return Page.objects.get(pk=self.pk)
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return (self.publisher_is_draft and qs.drafts() or qs.public().published())
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise PublisherCantPublish
return True
def get_previous_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__lt=self.path, **filters).reverse()[0]
except IndexError:
return None
def get_next_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__gt=self.path, **filters)[0]
except IndexError:
return None
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides
original publisher method.
Args:
obj - public variant of `self` to be saved.
"""
if self.parent_id and self.parent.publisher_public_id:
assert self.parent_id == self.parent.pk
public_parent = Page.objects.get(pk=self.parent.publisher_public_id)
else:
public_parent = None
filters = dict(publisher_public__isnull=False)
if public_parent:
filters['publisher_public__parent__in'] = [public_parent]
else:
filters['publisher_public__parent__isnull'] = True
prev_sibling = self.get_previous_filtered_sibling(**filters)
public_prev_sib = (prev_sibling.publisher_public if prev_sibling else None)
if not self.publisher_public_id: # first time published
# is there anybody on left side?
if not self.parent_id:
obj.parent_id = None
self.add_sibling(pos='right', instance=obj)
else:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
public_prev_sib.add_sibling(pos='right', instance=obj)
else:
if public_parent:
obj.parent_id = public_parent.pk
obj.parent = public_parent
obj = obj.add_root(instance=obj)
obj = obj.move(target=public_parent, pos='first-child')
else:
# check if object was moved / structural tree change
prev_public_sibling = obj.get_previous_filtered_sibling()
if self.depth != obj.depth or \
public_parent != obj.parent or \
public_prev_sib != prev_public_sibling:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
obj.save()
obj = obj.move(public_prev_sib, pos="right")
elif public_parent:
# move as a first child to parent
obj.parent_id = public_parent.pk
obj.save()
obj = obj.move(target=public_parent, pos='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling(**filters)
if next_sibling and next_sibling.publisher_public_id:
obj.parent_id = next_sibling.parent_id
obj.save()
obj = obj.move(next_sibling.publisher_public, pos="left")
else:
obj.save()
def move(self, target, pos=None):
super(Page, self).move(target, pos)
return self.reload()
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.placeholder import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if placeholder_name not in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
return found
def get_xframe_options(self):
""" Finds X_FRAME_OPTION from tree if inherited """
xframe_options = cache.get('cms:xframe_options:%s' % self.pk)
if xframe_options is None:
ancestors = self.get_ancestors()
# Ignore those pages which just inherit their value
ancestors = ancestors.exclude(xframe_options=self.X_FRAME_OPTIONS_INHERIT)
# Now just give me the clickjacking setting (not anything else)
xframe_options = list(ancestors.values_list('xframe_options', flat=True))
if self.xframe_options != self.X_FRAME_OPTIONS_INHERIT:
xframe_options.append(self.xframe_options)
if len(xframe_options) <= 0:
# No ancestors were found
return None
xframe_options = xframe_options[0]
cache.set('cms:xframe_options:%s' % self.pk, xframe_options)
return xframe_options
def undo(self):
"""
Revert the current page to the previous revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__lt=current_revision.pk)[0]
except IndexError as e:
e.message = "no previous revision found"
raise
previous_revision = previous_version.revision
clean = self._apply_revision(previous_revision)
return Page.objects.get(pk=self.pk), clean
def redo(self):
"""
Revert the current page to the next revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__gt=current_revision.pk).order_by('pk')[0]
except IndexError as e:
e.message = "no next revision found"
raise
next_revision = previous_version.revision
clean = self._apply_revision(next_revision)
return Page.objects.get(pk=self.pk), clean
def _apply_revision(self, target_revision):
"""
Revert to a specific revision
"""
from cms.utils.page_resolver import is_valid_url
# Get current titles
old_titles = list(self.title_set.all())
# remove existing plugins / placeholders in the current page version
placeholder_ids = self.placeholders.all().values_list('pk', flat=True)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-depth')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
self.placeholders.all().delete()
# populate the page status data from the target version
target_revision.revert(True)
rev_page = get_object_or_404(Page, pk=self.pk)
rev_page.revision_id = target_revision.pk
rev_page.publisher_public_id = self.publisher_public_id
rev_page.save()
# cleanup placeholders
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
# check reverted titles for slug collisions
new_titles = rev_page.title_set.all()
clean = True
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
clean = False
return clean
def _reversion():
exclude_fields = [
'publisher_is_draft',
'publisher_public',
'publisher_state',
]
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
|
|
#!/usr/bin/env python
"""
Core Neurokernel classes.
"""
import atexit
import time
import bidict
from mpi4py import MPI
import numpy as np
import twiggy
from ctx_managers import IgnoreKeyboardInterrupt, OnKeyboardInterrupt, \
ExceptionOnSignal, TryExceptionOnSignal
from mixins import LoggerMixin
import mpi
from tools.logging import setup_logger
from tools.misc import catch_exception, dtype_to_mpi
from tools.mpi import MPIOutput
from pattern import Interface, Pattern
from plsel import SelectorMethods
from pm import BasePortMapper, PortMapper
from routing_table import RoutingTable
from uid import uid
CTRL_TAG = 1
# MPI tags for distinguishing messages associated with different port types:
GPOT_TAG = CTRL_TAG+1
SPIKE_TAG = CTRL_TAG+2
class Module(mpi.Worker):
"""
Processing module.
This class repeatedly executes a work method until it receives a
quit message via its control network port.
Parameters
----------
sel : str, unicode, or sequence
Path-like selector describing the module's interface of
exposed ports.
sel_in, sel_out, sel_gpot, sel_spike : str, unicode, or sequence
Selectors respectively describing all input, output, graded potential,
and spiking ports in the module's interface.
data_gpot, data_spike : numpy.ndarray
Data arrays associated with the graded potential and spiking ports in
the . Array length must equal the number
of ports in a module's interface.
columns : list of str
Interface port attributes.
Network port for controlling the module instance.
ctrl_tag, gpot_tag, spike_tag : int
MPI tags that respectively identify messages containing control data,
graded potential port values, and spiking port values transmitted to
worker nodes.
id : str
Module identifier. If no identifier is specified, a unique
identifier is automatically generated.
device : int
GPU device to use. May be set to None if the module does not perform
GPU processing.
routing_table : neurokernel.routing_table.RoutingTable
Routing table describing data connections between modules. If no routing
table is specified, the module will be executed in isolation.
rank_to_id : bidict.bidict
Mapping between MPI ranks and module object IDs.
debug : bool
Debug flag. When True, exceptions raised during the work method
are not be suppressed.
time_sync : bool
Time synchronization flag. When True, debug messages are not emitted
during module synchronization and the time taken to receive all incoming
data is computed.
Attributes
----------
interface : Interface
Object containing information about a module's ports.
pm : dict
`pm['gpot']` and `pm['spike']` are instances of neurokernel.pm.PortMapper that
map a module's ports to the contents of the values in `data`.
data : dict
`data['gpot']` and `data['spike']` are arrays of data associated with
a module's graded potential and spiking ports.
"""
def __init__(self, sel, sel_in, sel_out,
sel_gpot, sel_spike, data_gpot, data_spike,
columns=['interface', 'io', 'type'],
ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG, spike_tag=SPIKE_TAG,
id=None, device=None,
routing_table=None, rank_to_id=None,
debug=False, time_sync=False):
super(Module, self).__init__(ctrl_tag)
self.debug = debug
self.time_sync = time_sync
self.device = device
self._gpot_tag = gpot_tag
self._spike_tag = spike_tag
# Require several necessary attribute columns:
if 'interface' not in columns:
raise ValueError('interface column required')
if 'io' not in columns:
raise ValueError('io column required')
if 'type' not in columns:
raise ValueError('type column required')
# Manually register the file close method associated with MPIOutput
# so that it is called by atexit before MPI.Finalize() (if the file is
# closed after MPI.Finalize() is called, an error will occur):
for k, v in twiggy.emitters.iteritems():
if isinstance(v._output, MPIOutput):
atexit.register(v._output.close)
# Ensure that the input and output port selectors respectively
# select mutually exclusive subsets of the set of all ports exposed by
# the module:
if not SelectorMethods.is_in(sel_in, sel):
raise ValueError('input port selector not in selector of all ports')
if not SelectorMethods.is_in(sel_out, sel):
raise ValueError('output port selector not in selector of all ports')
if not SelectorMethods.are_disjoint(sel_in, sel_out):
raise ValueError('input and output port selectors not disjoint')
# Ensure that the graded potential and spiking port selectors
# respectively select mutually exclusive subsets of the set of all ports
# exposed by the module:
if not SelectorMethods.is_in(sel_gpot, sel):
raise ValueError('gpot port selector not in selector of all ports')
if not SelectorMethods.is_in(sel_spike, sel):
raise ValueError('spike port selector not in selector of all ports')
if not SelectorMethods.are_disjoint(sel_gpot, sel_spike):
raise ValueError('gpot and spike port selectors not disjoint')
# Save routing table and mapping between MPI ranks and module IDs:
self.routing_table = routing_table
self.rank_to_id = rank_to_id
# Generate a unique ID if none is specified:
if id is None:
self.id = uid()
else:
# If a unique ID was specified and the routing table is not empty
# (i.e., there are connections between multiple modules),
# the id must be a node in the table:
if routing_table is not None and len(routing_table.ids) and \
not routing_table.has_node(id):
raise ValueError('routing table must contain specified module ID')
self.id = id
# Reformat logger name:
LoggerMixin.__init__(self, 'mod %s' % self.id)
# Create module interface given the specified ports:
self.interface = Interface(sel, columns)
# Set the interface ID to 0; we assume that a module only has one interface:
self.interface[sel, 'interface'] = 0
# Set the port attributes:
self.interface[sel_in, 'io'] = 'in'
self.interface[sel_out, 'io'] = 'out'
self.interface[sel_gpot, 'type'] = 'gpot'
self.interface[sel_spike, 'type'] = 'spike'
# Find the input and output ports:
self.in_ports = self.interface.in_ports().to_tuples()
self.out_ports = self.interface.out_ports().to_tuples()
# Find the graded potential and spiking ports:
self.gpot_ports = self.interface.gpot_ports().to_tuples()
self.spike_ports = self.interface.spike_ports().to_tuples()
self.in_gpot_ports = self.interface.in_ports().gpot_ports().to_tuples()
self.in_spike_ports = self.interface.in_ports().spike_ports().to_tuples()
self.out_gpot_ports = self.interface.out_ports().gpot_ports().to_tuples()
self.out_spike_ports = self.interface.out_ports().spike_ports().to_tuples()
# Set up mapper between port identifiers and their associated data:
if len(data_gpot) != len(self.gpot_ports):
raise ValueError('incompatible gpot port data array length')
if len(data_spike) != len(self.spike_ports):
raise ValueError('incompatible spike port data array length')
self.data = {}
self.data['gpot'] = data_gpot
self.data['spike'] = data_spike
self.pm = {}
self.pm['gpot'] = PortMapper(sel_gpot, self.data['gpot'])
self.pm['spike'] = PortMapper(sel_spike, self.data['spike'])
def _init_gpu(self):
"""
Initialize GPU device.
Notes
-----
Must be called from within the `run()` method, not from within
`__init__()`.
"""
if self.device == None:
self.log_info('no GPU specified - not initializing ')
else:
# Import pycuda.driver here so as to facilitate the
# subclassing of Module to create pure Python LPUs that don't use GPUs:
import pycuda.driver as drv
drv.init()
try:
self.gpu_ctx = drv.Device(self.device).make_context()
except Exception as e:
self.log_info('_init_gpu exception: ' + e.message)
else:
atexit.register(self.gpu_ctx.pop)
self.log_info('GPU initialized')
@property
def N_gpot_ports(self):
"""
Number of exposed graded-potential ports.
"""
return len(self.interface.gpot_ports())
@property
def N_spike_ports(self):
"""
Number of exposed spiking ports.
"""
return len(self.interface.spike_ports())
def _get_in_data(self):
"""
Get input data from incoming transmission buffer.
Populate the data arrays associated with a module's ports using input
data received from other modules.
"""
if self.net in ['none', 'ctrl']:
self.log_info('not retrieving from input buffer')
else:
self.log_info('retrieving from input buffer')
# Since fan-in is not permitted, the data from all source modules
# must necessarily map to different ports; we can therefore write each
# of the received data to the array associated with the module's ports
# here without worry of overwriting the data from each source module:
for in_id in self._in_ids:
# Check for exceptions so as to not fail on the first emulation
# step when there is no input data to retrieve:
try:
# The first entry of `data` contains graded potential values,
# while the second contains spiking port values (i.e., 0 or
# 1):
data = self._in_data[in_id].popleft()
except:
self.log_info('no input data from [%s] retrieved' % in_id)
else:
self.log_info('input data from [%s] retrieved' % in_id)
# Assign transmitted values directly to port data array:
if len(self._in_port_dict_ids['gpot'][in_id]):
self.pm['gpot'].set_by_inds(self._in_port_dict_ids['gpot'][in_id], data[0])
if len(self._in_port_dict_ids['spike'][in_id]):
self.pm['spike'].set_by_inds(self._in_port_dict_ids['spike'][in_id], data[1])
def _put_out_data(self):
"""
Put specified output data in outgoing transmission buffer.
Stage data from the data arrays associated with a module's ports for
output to other modules.
Notes
-----
The output spike port selection algorithm could probably be made faster.
"""
if self.net in ['none', 'ctrl']:
self.log_info('not populating output buffer')
else:
self.log_info('populating output buffer')
# Clear output buffer before populating it:
self._out_data = []
# Select data that should be sent to each destination module and append
# it to the outgoing queue:
for out_id in self._out_ids:
# Select port data using list of graded potential ports that can
# transmit output:
if len(self._out_port_dict_ids['gpot'][out_id]):
gpot_data = \
self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][out_id])
else:
gpot_data = np.array([], self.pm['gpot'].dtype)
if len(self._out_port_dict_ids['spike'][out_id]):
spike_data = \
self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][out_id])
'''
SAME AS BEFORE
else:
spike_data = np.array([], self.pm['spike'].dtype)
try:
self._out_data.append((out_id, (gpot_data, spike_data)))
# Attempt to stage the emitted port data for transmission:
'''
try:
self._out_data.append((out_id, (gpot_data)))
except:
self.log_info('no output data to [%s] sent' % out_id)
else:
self.log_info('output data to [%s] sent' % out_id)
def run_step(self):
"""
Module work method.
This method should be implemented to do something interesting with new
input port data in the module's `pm` attribute and update the attribute's
output port data if necessary. It should not interact with any other
class attributes.
"""
self.log_info('running execution step')
def _init_port_dicts(self):
"""
Initial dictionaries of source/destination ports in current module.
"""
# Extract identifiers of source ports in the current module's interface
# for all modules receiving output from the current module:
self._out_port_dict = {}
self._out_port_dict['gpot'] = {}
self._out_port_dict['spike'] = {}
self._out_port_dict_ids = {}
self._out_port_dict_ids['gpot'] = {}
self._out_port_dict_ids['spike'] = {}
self._out_ids = self.routing_table.dest_ids(self.id)
self._out_ranks = [self.rank_to_id[:i] for i in self._out_ids]
for out_id in self._out_ids:
self.log_info('extracting output ports for %s' % out_id)
# Get interfaces of pattern connecting the current module to
# destination module `out_id`; `int_0` is connected to the
# current module, `int_1` is connected to the other module:
pat = self.routing_table[self.id, out_id]['pattern']
int_0 = self.routing_table[self.id, out_id]['int_0']
int_1 = self.routing_table[self.id, out_id]['int_1']
# Get ports in interface (`int_0`) connected to the current
# module that are connected to the other module via the pattern:
self._out_port_dict['gpot'][out_id] = \
pat.src_idx(int_0, int_1, 'gpot', 'gpot')
self._out_port_dict_ids['gpot'][out_id] = \
self.pm['gpot'].ports_to_inds(self._out_port_dict['gpot'][out_id])
self._out_port_dict['spike'][out_id] = \
pat.src_idx(int_0, int_1, 'spike', 'spike')
self._out_port_dict_ids['spike'][out_id] = \
self.pm['spike'].ports_to_inds(self._out_port_dict['spike'][out_id])
# Extract identifiers of destination ports in the current module's
# interface for all modules sending input to the current module:
self._in_port_dict = {}
self._in_port_dict['gpot'] = {}
self._in_port_dict['spike'] = {}
self._in_port_dict_ids = {}
self._in_port_dict_ids['gpot'] = {}
self._in_port_dict_ids['spike'] = {}
self._in_ids = self.routing_table.src_ids(self.id)
self._in_ranks = [self.rank_to_id[:i] for i in self._in_ids]
for in_id in self._in_ids:
self.log_info('extracting input ports for %s' % in_id)
# Get interfaces of pattern connecting the current module to
# source module `in_id`; `int_1` is connected to the current
# module, `int_0` is connected to the other module:
pat = self.routing_table[in_id, self.id]['pattern']
int_0 = self.routing_table[in_id, self.id]['int_0']
int_1 = self.routing_table[in_id, self.id]['int_1']
# Get ports in interface (`int_1`) connected to the current
# module that are connected to the other module via the pattern:
self._in_port_dict['gpot'][in_id] = \
pat.dest_idx(int_0, int_1, 'gpot', 'gpot')
self._in_port_dict_ids['gpot'][in_id] = \
self.pm['gpot'].ports_to_inds(self._in_port_dict['gpot'][in_id])
self._in_port_dict['spike'][in_id] = \
pat.dest_idx(int_0, int_1, 'spike', 'spike')
self._in_port_dict_ids['spike'][in_id] = \
self.pm['spike'].ports_to_inds(self._in_port_dict['spike'][in_id])
def _init_data_in(self):
"""
Buffers for receiving data from other modules.
Notes
-----
Must be executed after `_init_port_dicts()`.
"""
# Allocate arrays for receiving data transmitted to the module so that
# they don't have to be reallocated during every execution step
# synchronization:
self.data_in = {}
self.data_in['gpot'] = {}
self.data_in['spike'] = {}
for in_id in self._in_ids:
self.data_in['gpot'][in_id] = \
np.empty(np.shape(self._in_port_dict['gpot'][in_id]),
self.pm['gpot'].dtype)
self.data_in['spike'][in_id] = \
np.empty(np.shape(self._in_port_dict['spike'][in_id]),
self.pm['spike'].dtype)
def _sync(self):
"""
Send output data and receive input data.
"""
if self.time_sync:
start = time.time()
req = MPI.Request()
requests = []
# For each destination module, extract elements from the current
# module's port data array, copy them to a contiguous array, and
# transmit the latter:
for dest_id, dest_rank in zip(self._out_ids, self._out_ranks):
# Get source ports in current module that are connected to the
# destination module:
data_gpot = self.pm['gpot'].get_by_inds(self._out_port_dict_ids['gpot'][dest_id])
data_spike = self.pm['spike'].get_by_inds(self._out_port_dict_ids['spike'][dest_id])
if not self.time_sync:
self.log_info('gpot data being sent to %s: %s' % \
(dest_id, str(data_gpot)))
self.log_info('spike data being sent to %s: %s' % \
(dest_id, str(data_spike)))
r = MPI.COMM_WORLD.Isend([data_gpot,
dtype_to_mpi(data_gpot.dtype)],
dest_rank, GPOT_TAG)
requests.append(r)
r = MPI.COMM_WORLD.Isend([data_spike,
dtype_to_mpi(data_spike.dtype)],
dest_rank, SPIKE_TAG)
requests.append(r)
if not self.time_sync:
self.log_info('sending to %s' % dest_id)
if not self.time_sync:
self.log_info('sent all data from %s' % self.id)
# For each source module, receive elements and copy them into the
# current module's port data array:
received_gpot = []
received_spike = []
ind_in_gpot_list = []
ind_in_spike_list = []
for src_id, src_rank in zip(self._in_ids, self._in_ranks):
r = MPI.COMM_WORLD.Irecv([self.data_in['gpot'][src_id],
dtype_to_mpi(data_gpot.dtype)],
source=src_rank, tag=GPOT_TAG)
requests.append(r)
r = MPI.COMM_WORLD.Irecv([self.data_in['spike'][src_id],
dtype_to_mpi(data_spike.dtype)],
source=src_rank, tag=SPIKE_TAG)
requests.append(r)
if not self.time_sync:
self.log_info('receiving from %s' % src_id)
req.Waitall(requests)
if not self.time_sync:
self.log_info('received all data received by %s' % self.id)
# Copy received elements into the current module's data array:
for src_id in self._in_ids:
ind_in_gpot = self._in_port_dict_ids['gpot'][src_id]
self.pm['gpot'].set_by_inds(ind_in_gpot, self.data_in['gpot'][src_id])
ind_in_spike = self._in_port_dict_ids['spike'][src_id]
self.pm['spike'].set_by_inds(ind_in_spike, self.data_in['spike'][src_id])
# Save timing data:
if self.time_sync:
stop = time.time()
n_gpot = 0
n_spike = 0
for src_id in self._in_ids:
n_gpot += len(self.data_in['gpot'][src_id])
n_spike += len(self.data_in['spike'][src_id])
self.log_info('sent timing data to master')
self.intercomm.isend(['sync_time',
(self.rank, self.steps, start, stop,
n_gpot*self.pm['gpot'].dtype.itemsize+\
n_spike*self.pm['spike'].dtype.itemsize)],
dest=0, tag=self._ctrl_tag)
else:
self.log_info('saved all data received by %s' % self.id)
def run_step(self):
"""
Module work method.
This method should be implemented to do something interesting with new
input port data in the module's `pm` attribute and update the attribute's
output port data if necessary. It should not interact with any other
class attributes.
"""
self.log_info('running execution step')
def pre_run(self):
"""
Code to run before main loop.
This method is invoked by the `run()` method before the main loop is
started.
"""
self.log_info('running code before body of worker %s' % self.rank)
# Initialize _out_port_dict and _in_port_dict attributes:
self._init_port_dicts()
# Initialize data_in attribute:
self._init_data_in()
# Start timing the main loop:
if self.time_sync:
self.intercomm.isend(['start_time', (self.rank, time.time())],
dest=0, tag=self._ctrl_tag)
self.log_info('sent start time to manager')
def post_run(self):
"""
Code to run after main loop.
This method is invoked by the `run()` method after the main loop is
started.
"""
self.log_info('running code after body of worker %s' % self.rank)
# Stop timing the main loop before shutting down the emulation:
if self.time_sync:
self.intercomm.isend(['stop_time', (self.rank, time.time())],
dest=0, tag=self._ctrl_tag)
self.log_info('sent stop time to manager')
# Send acknowledgment message:
self.intercomm.isend(['done', self.rank], 0, self._ctrl_tag)
self.log_info('done message sent to manager')
def run(self):
"""
Body of process.
"""
# Don't allow keyboard interruption of process:
with IgnoreKeyboardInterrupt():
# Activate execution loop:
super(Module, self).run()
def do_work(self):
"""
Work method.
This method is repeatedly executed by the Worker instance after the
instance receives a 'start' control message and until it receives a 'stop'
control message.
"""
# If the debug flag is set, don't catch exceptions so that
# errors will lead to visible failures:
if self.debug:
# Run the processing step:
self.run_step()
# Synchronize:
self._sync()
else:
# Run the processing step:
catch_exception(self.run_step, self.log_info)
# Synchronize:
catch_exception(self._sync, self.log_info)
class Manager(mpi.WorkerManager):
"""
Module manager.
Instantiates, connects, starts, and stops modules comprised by an
emulation. All modules and connections must be added to a module manager
instance before they can be run.
Attributes
----------
ctrl_tag : int
MPI tag to identify control messages.
modules : dict
Module instances. Keyed by module object ID.
routing_table : routing_table.RoutingTable
Table of data transmission connections between modules.
rank_to_id : bidict.bidict
Mapping between MPI ranks and module object IDs.
"""
def __init__(self, required_args=['sel', 'sel_in', 'sel_out',
'sel_gpot', 'sel_spike'],
ctrl_tag=CTRL_TAG):
super(Manager, self).__init__(ctrl_tag)
# Required constructor args:
self.required_args = required_args
# One-to-one mapping between MPI rank and module ID:
self.rank_to_id = bidict.bidict()
# Unique object ID:
self.id = uid()
# Set up a dynamic table to contain the routing table:
self.routing_table = RoutingTable()
# Number of emulation steps to run:
self.steps = np.inf
# Variables for timing run loop:
self.start_time = 0.0
self.stop_time = 0.0
# Variables for computing throughput:
self.counter = 0
self.total_sync_time = 0.0
self.total_sync_nbytes = 0.0
self.received_data = {}
# Average step synchronization time:
self._average_step_sync_time = 0.0
# Computed throughput (only updated after an emulation run):
self._average_throughput = 0.0
self._total_throughput = 0.0
self.log_info('manager instantiated')
@property
def average_step_sync_time(self):
"""
Average step synchronization time.
"""
return self._average_step_sync_time
@average_step_sync_time.setter
def average_step_sync_time(self, t):
self._average_step_sync_time = t
@property
def total_throughput(self):
"""
Total received data throughput.
"""
return self._total_throughput
@total_throughput.setter
def total_throughput(self, t):
self._total_throughput = t
@property
def average_throughput(self):
"""
Average received data throughput per step.
"""
return self._average_throughput
@average_throughput.setter
def average_throughput(self, t):
self._average_throughput = t
def validate_args(self, target):
"""
Check whether a class' constructor has specific arguments.
Parameters
----------
target : Module
Module class to instantiate and run.
Returns
-------
result : bool
True if all of the required arguments are present, False otherwise.
"""
arg_names = set(mpi.getargnames(target.__init__))
for required_arg in self.required_args:
if required_arg not in arg_names:
return False
return True
def add(self, target, id, *args, **kwargs):
"""
Add a module class to the emulation.
Parameters
----------
target : Module
Module class to instantiate and run.
id : str
Identifier to use when connecting an instance of this class
with an instance of some other class added to the emulation.
args : sequence
Sequential arguments to pass to the constructor of the class
associated with identifier `id`.
kwargs : dict
Named arguments to pass to the constructor of the class
associated with identifier `id`.
"""
if not issubclass(target, Module):
raise ValueError('target class is not a Module subclass')
argnames = mpi.getargnames(target.__init__)
# Selectors must be passed to the module upon instantiation;
# the module manager must know about them to assess compatibility:
# XXX: keep this commented out for the time being because it interferes
# with instantiation of child classes (such as those in LPU.py):
# if not self.validate_args(target):
# raise ValueError('class constructor missing required args')
# Need to associate an ID and the routing table with each module class
# to instantiate; because the routing table's can potentially occupy
# lots of space, we don't add it to the argument dict here - it is
# broadcast to all processes separately and then added to the argument
# dict in mpi_backend.py:
kwargs['id'] = id
kwargs['rank_to_id'] = self.rank_to_id
rank = super(Manager, self).add(target, *args, **kwargs)
self.rank_to_id[rank] = id
def connect(self, id_0, id_1, pat, int_0=0, int_1=1, compat_check=True):
if not isinstance(pat, Pattern):
raise ValueError('pat is not a Pattern instance')
if id_0 not in self.rank_to_id.values():
raise ValueError('unrecognized module id %s' % id_0)
if id_1 not in self.rank_to_id.values():
raise ValueError('unrecognized module id %s' % id_1)
if not (int_0 in pat.interface_ids and int_1 in pat.interface_ids):
raise ValueError('unrecognized pattern interface identifiers')
self.log_info('connecting modules {0} and {1}'
.format(id_0, id_1))
# Check compatibility of the interfaces exposed by the modules and the
# pattern; since the manager only contains module classes and not class
# instances, we need to create Interface instances from the selectors
# associated with the modules in order to test their compatibility:
if compat_check:
rank_0 = self.rank_to_id.inv[id_0]
rank_1 = self.rank_to_id.inv[id_1]
self.log_info('checking compatibility of modules {0} and {1} and'
' assigned pattern'.format(id_0, id_1))
mod_int_0 = Interface(self._kwargs[rank_0]['sel'])
mod_int_0[self._kwargs[rank_0]['sel']] = 0
mod_int_1 = Interface(self._kwargs[rank_1]['sel'])
mod_int_1[self._kwargs[rank_1]['sel']] = 0
mod_int_0[self._kwargs[rank_0]['sel_in'], 'io'] = 'in'
mod_int_0[self._kwargs[rank_0]['sel_out'], 'io'] = 'out'
mod_int_0[self._kwargs[rank_0]['sel_gpot'], 'type'] = 'gpot'
mod_int_0[self._kwargs[rank_0]['sel_spike'], 'type'] = 'spike'
mod_int_1[self._kwargs[rank_1]['sel_in'], 'io'] = 'in'
mod_int_1[self._kwargs[rank_1]['sel_out'], 'io'] = 'out'
mod_int_1[self._kwargs[rank_1]['sel_gpot'], 'type'] = 'gpot'
mod_int_1[self._kwargs[rank_1]['sel_spike'], 'type'] = 'spike'
if not mod_int_0.is_compatible(0, pat.interface, int_0, True):
raise ValueError('module %s interface incompatible '
'with pattern interface %s' % (id_0, int_0))
if not mod_int_1.is_compatible(0, pat.interface, int_1, True):
raise ValueError('module %s interface incompatible '
'with pattern interface %s' % (id_1, int_1))
# XXX Need to check for fan-in XXX
# Store the pattern information in the routing table:
self.log_info('updating routing table with pattern')
if pat.is_connected(0, 1):
self.routing_table[id_0, id_1] = {'pattern': pat,
'int_0': int_0, 'int_1': int_1}
if pat.is_connected(1, 0):
self.routing_table[id_1, id_0] = {'pattern': pat,
'int_0': int_1, 'int_1': int_0}
self.log_info('connected modules {0} and {1}'.format(id_0, id_1))
def process_worker_msg(self, msg):
# Process timing data sent by workers:
if msg[0] == 'start_time':
rank, self.start_time = msg[1]
self.log_info('start time data: %s' % str(msg[1]))
elif msg[0] == 'stop_time':
rank, self.stop_time = msg[1]
self.log_info('stop time data: %s' % str(msg[1]))
elif msg[0] == 'sync_time':
rank, steps, start, stop, nbytes = msg[1]
self.log_info('sync time data: %s' % str(msg[1]))
# Collect timing data for each execution step:
if steps not in self.received_data:
self.received_data[steps] = {}
self.received_data[steps][rank] = (start, stop, nbytes)
# After adding the latest timing data for a specific step, check
# whether data from all modules has arrived for that step:
if set(self.received_data[steps].keys()) == set(self.rank_to_id.keys()):
# Exclude the very first step to avoid including delays due to
# PyCUDA kernel compilation:
if steps != 0:
# The duration an execution is assumed to be the longest of
# the received intervals:
step_sync_time = max([(d[1]-d[0]) for d in self.received_data[steps].values()])
# Obtain the total number of bytes received by all of the
# modules during the execution step:
step_nbytes = sum([d[2] for d in self.received_data[steps].values()])
self.total_sync_time += step_sync_time
self.total_sync_nbytes += step_nbytes
self.average_throughput = (self.average_throughput*self.counter+\
step_nbytes/step_sync_time)/(self.counter+1)
self.average_step_sync_time = (self.average_step_sync_time*self.counter+\
step_sync_time)/(self.counter+1)
self.counter += 1
else:
# To skip the first sync step, set the start time to the
# latest stop time of the first step:
self.start_time = max([d[1] for d in self.received_data[steps].values()])
# Clear the data for the processed execution step so that
# that the received_data dict doesn't consume unnecessary memory:
del self.received_data[steps]
# Compute throughput using accumulated timing data:
if self.total_sync_time > 0:
self.total_throughput = self.total_sync_nbytes/self.total_sync_time
else:
self.total_throughput = 0.0
def wait(self):
super(Manager, self).wait()
self.log_info('avg step sync time/avg per-step throughput' \
'/total transm throughput/run loop duration:' \
'%s, %s, %s, %s' % \
(self.average_step_sync_time, self.average_throughput,
self.total_throughput, self.stop_time-self.start_time))
if __name__ == '__main__':
import neurokernel.mpi_relaunch
class MyModule(Module):
"""
Example of derived module class.
"""
def run_step(self):
super(MyModule, self).run_step()
# Do something with input graded potential data:
self.log_info('input gpot port data: '+str(self.pm['gpot'][self.in_gpot_ports]))
# Do something with input spike data:
self.log_info('input spike port data: '+str(self.pm['spike'][self.in_spike_ports]))
# Output random graded potential data:
out_gpot_data = np.random.rand(len(self.out_gpot_ports))
self.pm['gpot'][self.out_gpot_ports] = out_gpot_data
self.log_info('output gpot port data: '+str(out_gpot_data))
# Randomly select output ports to emit spikes:
out_spike_data = np.random.randint(0, 2, len(self.out_spike_ports))
self.pm['spike'][self.out_spike_ports] = out_spike_data
self.log_info('output spike port data: '+str(out_spike_data))
logger = mpi.setup_logger(screen=True, file_name='neurokernel.log',
mpi_comm=MPI.COMM_WORLD, multiline=True)
man = Manager()
m1_int_sel_in_gpot = '/a/in/gpot0,/a/in/gpot1'
m1_int_sel_out_gpot = '/a/out/gpot0,/a/out/gpot1'
m1_int_sel_in_spike = '/a/in/spike0,/a/in/spike1'
m1_int_sel_out_spike = '/a/out/spike0,/a/out/spike1'
m1_int_sel = ','.join([m1_int_sel_in_gpot, m1_int_sel_out_gpot,
m1_int_sel_in_spike, m1_int_sel_out_spike])
m1_int_sel_in = ','.join((m1_int_sel_in_gpot, m1_int_sel_in_spike))
m1_int_sel_out = ','.join((m1_int_sel_out_gpot, m1_int_sel_out_spike))
m1_int_sel_gpot = ','.join((m1_int_sel_in_gpot, m1_int_sel_out_gpot))
m1_int_sel_spike = ','.join((m1_int_sel_in_spike, m1_int_sel_out_spike))
N1_gpot = SelectorMethods.count_ports(m1_int_sel_gpot)
N1_spike = SelectorMethods.count_ports(m1_int_sel_spike)
m2_int_sel_in_gpot = '/b/in/gpot0,/b/in/gpot1'
m2_int_sel_out_gpot = '/b/out/gpot0,/b/out/gpot1'
m2_int_sel_in_spike = '/b/in/spike0,/b/in/spike1'
m2_int_sel_out_spike = '/b/out/spike0,/b/out/spike1'
m2_int_sel = ','.join([m2_int_sel_in_gpot, m2_int_sel_out_gpot,
m2_int_sel_in_spike, m2_int_sel_out_spike])
m2_int_sel_in = ','.join((m2_int_sel_in_gpot, m2_int_sel_in_spike))
m2_int_sel_out = ','.join((m2_int_sel_out_gpot, m2_int_sel_out_spike))
m2_int_sel_gpot = ','.join((m2_int_sel_in_gpot, m2_int_sel_out_gpot))
m2_int_sel_spike = ','.join((m2_int_sel_in_spike, m2_int_sel_out_spike))
N2_gpot = SelectorMethods.count_ports(m2_int_sel_gpot)
N2_spike = SelectorMethods.count_ports(m2_int_sel_spike)
# Note that the module ID doesn't need to be listed in the specified
# constructor arguments:
m1_id = 'm1 '
man.add(MyModule, m1_id, m1_int_sel, m1_int_sel_in, m1_int_sel_out,
m1_int_sel_gpot, m1_int_sel_spike,
np.zeros(N1_gpot, dtype=np.double),
np.zeros(N1_spike, dtype=int),
['interface', 'io', 'type'],
CTRL_TAG, GPOT_TAG, SPIKE_TAG, time_sync=True)
m2_id = 'm2 '
man.add(MyModule, m2_id, m2_int_sel, m2_int_sel_in, m2_int_sel_out,
m2_int_sel_gpot, m2_int_sel_spike,
np.zeros(N2_gpot, dtype=np.double),
np.zeros(N2_spike, dtype=int),
['interface', 'io', 'type'],
CTRL_TAG, GPOT_TAG, SPIKE_TAG, time_sync=True)
# Make sure that all ports in the patterns' interfaces are set so
# that they match those of the modules:
pat12 = Pattern(m1_int_sel, m2_int_sel)
pat12.interface[m1_int_sel_out_gpot] = [0, 'in', 'gpot']
pat12.interface[m1_int_sel_in_gpot] = [0, 'out', 'gpot']
pat12.interface[m1_int_sel_out_spike] = [0, 'in', 'spike']
pat12.interface[m1_int_sel_in_spike] = [0, 'out', 'spike']
pat12.interface[m2_int_sel_in_gpot] = [1, 'out', 'gpot']
pat12.interface[m2_int_sel_out_gpot] = [1, 'in', 'gpot']
pat12.interface[m2_int_sel_in_spike] = [1, 'out', 'spike']
pat12.interface[m2_int_sel_out_spike] = [1, 'in', 'spike']
pat12['/a/out/gpot0', '/b/in/gpot0'] = 1
pat12['/a/out/gpot1', '/b/in/gpot1'] = 1
pat12['/b/out/gpot0', '/a/in/gpot0'] = 1
pat12['/b/out/gpot1', '/a/in/gpot1'] = 1
pat12['/a/out/spike0', '/b/in/spike0'] = 1
pat12['/a/out/spike1', '/b/in/spike1'] = 1
pat12['/b/out/spike0', '/a/in/spike0'] = 1
pat12['/b/out/spike1', '/a/in/spike1'] = 1
man.connect(m1_id, m2_id, pat12, 0, 1)
# Start emulation and allow it to run for a little while before shutting
# down. To set the emulation to exit after executing a fixed number of
# steps, start it as follows and remove the sleep statement:
# man.start(500)
man.spawn()
man.start(20)
man.wait()
|
|
''' Version 1.000
Code provided by Daniel Jiwoong Im
Permission is granted for anyone to copy, use, modify, or distribute this
program and accompanying programs and documents for any purpose, provided
this copyright notice is retained and prominently displayed, along with
a note saying that the original programs are available from our
web page.
The programs and documents are distributed without any warranty, express or
implied. As the programs were written for research purposes only, they have
not been tested to the degree that would be advisable in any important
application. All use of these programs is entirely at the user's own risk.'''
import cPickle, gzip, numpy
import theano
import theano.tensor as T
import numpy as np
import math
import matplotlib as mp
import matplotlib.pyplot as plt
def save_the_weight(x,fname):
f = file(fname+'.save', 'wb')
cPickle.dump(x, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def separate_data_into_classes(train_set, num_classes, flag=1):
sep_train_set = []
num_cases_per_class = []
for class_i in xrange(num_classes):
train_data = train_set[0][train_set[1]==class_i,:]
Nc = train_data.shape[0]
num_cases_per_class.append(Nc)
if flag:
sep_train_set.append(shared_dataset([train_data, class_i *np.ones((Nc,1),dtype='float32')]))
else:
sep_train_set.append([train_data, class_i *np.ones((Nc,1),dtype='float32')])
return sep_train_set, num_cases_per_class
def load_dataset(path):
# Load the dataset
f = gzip.open(path, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
return [train_set[0], train_set[1]], \
[valid_set[0],valid_set[1]], \
[test_set [0],test_set [1]]
def normalize(data, vdata=None, tdata=None):
mu = np.mean(data, axis=0)
std = np.std(data, axis=0)
data = ( data - mu ) / std
if vdata == None and tdata != None:
tdata = (tdata - mu ) /std
return data, tdata
if vdata != None and tdata != None:
vdata = (vdata - mu ) /std
tdata = (tdata - mu ) /std
return data, vdata, tdata
return data
def unpickle(path):
''' For cifar-10 data, it will return dictionary'''
#Load the cifar 10
f = open(path, 'rb')
data = cPickle.load(f)
f.close()
return data
def share_input(x):
return theano.shared(np.asarray(x, dtype=theano.config.floatX))
def shared_dataset(data_xy):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x, dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y, dtype=theano.config.floatX))
#When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets us get around this issue
return shared_x, T.cast(shared_y, 'int32')
'''Given tiles of raw data, this function will return training, validation, and test sets.
r_train - ratio of train set
r_valid - ratio of valid set
r_test - ratio of test set'''
def gen_train_valid_test(raw_data, raw_target, r_train, r_valid, r_test):
N = raw_data.shape[0]
perms = np.random.permutation(N)
raw_data = raw_data[perms,:]
raw_target = raw_target[perms]
tot = float(r_train + r_valid + r_test) #Denominator
p_train = r_train / tot #train data ratio
p_valid = r_valid / tot #valid data ratio
p_test = r_test / tot #test data ratio
n_raw = raw_data.shape[0] #total number of data
n_train =int( math.floor(n_raw * p_train)) # number of train
n_valid =int( math.floor(n_raw * p_valid)) # number of valid
n_test =int( math.floor(n_raw * p_test) ) # number of test
train = raw_data[0:n_train, :]
valid = raw_data[n_train:n_train+n_valid, :]
test = raw_data[n_train+n_valid: n_train+n_valid+n_test,:]
train_target = raw_target[0:n_train]
valid_target = raw_target[n_train:n_train+n_valid]
test_target = raw_target[n_train+n_valid: n_train+n_valid+n_test]
print 'Among ', n_raw, 'raw data, we generated: '
print train.shape[0], ' training data'
print valid.shape[0], ' validation data'
print test.shape[0], ' test data\n'
train_set = [train, train_target]
valid_set = [valid, valid_target]
test_set = [test, test_target]
return [train_set, valid_set, test_set]
'''decaying learning rate'''
def get_epsilon(epsilon, n, i):
return epsilon / ( 1 + i/float(n))
def get_thrd(epoch, tot_epoch):
return (1.0 - 0.5) * epoch / tot_epoch + 0.5
'''Display dataset as a tiles'''
def display_dataset(data, patch_sz, tile_shape, scale_rows_to_unit_interval=False, \
binary=False, i=1, fname='dataset'):
x = tile_raster_images(data, img_shape=patch_sz, \
tile_shape=tile_shape, tile_spacing=(1,1), output_pixel_vals=False, scale_rows_to_unit_interval=scale_rows_to_unit_interval)
if binary:
x[x==1] = 255
## For MNIST
if fname != None:
plt.figure()
plt.imshow(x,cmap='gray')
plt.axis('off')
plt.savefig(fname+'.png')
else:
plt.figure()
plt.imshow(x,cmap='gray')
plt.axis('off')
#image = PIL.Image.fromarray(numpy.uint8(x))#.convert('RGB')
#image.show()
# For CIFAR10 images
#plt.imshow(x)
#image = PIL.Image.fromarray(x).convert('RGB')
#image.show()
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=False,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X[0].dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(out_shape,
dtype=dt) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
#print tile_row, tile_shape[1], tile_col, X.shape[0]
#print tile_row * tile_shape[1] + tile_col < X.shape[0]
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
#print this_x
#print scale_rows_to_unit_interval
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
#print this_x.shape
#print this_img
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
#print this_x.shape
#print this_img
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
train_set, valid_set, test_set = load_dataset('./mnist.pkl.gz')
print 123, train_set[1].shape
train_set_x, train_set_y = shared_dataset(train_set);
test_set_x, test_set_y = shared_dataset(test_set);
valid_set_x, valid_set_y = shared_dataset(valid_set);
print type(train_set_x)
print dir(train_set_x)
data = train_set_x[2 * 500: 3 * 500]
print data
|
|
'''
Implements stream writing of data after pcikling and compressing them.
'''
# TODO: optimization
# https://www.python.org/doc/essays/list2str/
import pickle as cP
import re
import zlib
import fileops as fops
import err
from IPython import embed
# Problem is, the delmiter is not unique. And the pickled data can
# have characters which mimic DELIM. Hence, does not look lik ethere
# is a way around it.
DELIM = str('\n\n\n\n')
class PickleStreamReader(object):
"""Reads a file writtern by PickleStreamWriter
read(): data = unpickle(uncompress(fileread))
Provides a read function which returns a generator of stored
pickles. Uses zlib for compression and cPickle for pickling
operations.
"""
def __init__(self, fname, compression=True):
self.fname = fname
return
def read(self):
with fops.ReadLine(self.fname, mode='rb') as sr:
ucp = PickleStreamUnCompressor(sr)
for data in ucp.uncompressor():
yield data
return
def read_older(self):
zo = zlib.decompressobj()
partial_pickle = []
done = False
with fops.ReadLine(self.fname, mode='rb') as sr:
while not done:
cdata = sr.readline()
if cdata == str(''):
done = True
ucdata = zo.flush()
else:
ucdata = zo.decompress(cdata)
buf = ucdata.split(DELIM)
assert(len(buf) > 0)
# if split occurred
if len(buf) > 1:
partial_pickle.append(buf[0])
# pickle is completed
pickle = str('').join(partial_pickle)
yield cP.loads(pickle)
partial_pickle = []
for pickle in buf[1:-1]:
yield cP.loads(pickle)
partial_pickle.append(buf[-1])
else:
partial_pickle.append(buf[0])
assert(partial_pickle == ['', ''])
return
class PickleStreamWriter(object):
"""Pickles and compresses data and writes it to a file.
Provides a function
writer(): filewrite(compress(pickle(data)))
which pickles and compresses the given data, before writing
it. Uses cpickle and zlib for pickling and compression, resp.
The pickling protocol used is HIGHEST_PROTOCOL.
The default compression level for zlib is used.
"""
def __init__(self, fname, compression=True):
self.fname = fname
self.sw = None
self.comp = None
return
def write(self, data):
self.sw.write(self.comp.compress(data))
return
def __enter__(self):
self.sw = fops.StreamWrite(self.fname, mode='wb')
self.comp = PickleStreamCompressor()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.sw.write(self.comp.flush())
self.sw.close_file()
return
class PickleStreamWriterIter(object):
"""Like PickleStreamWriter, but the writer() takes in an iterator.
Like a one-shot writer.
"""
def __init__(self, fname, compression=True):
self.fname = fname
def write(self, data):
zo = zlib.compressobj()
with fops.StreamWrite(self.fname, mode='wb') as sw:
for d in data:
pickled = cP.dumps(d, protocol=cP.HIGHEST_PROTOCOL)
# write size
sz = len(pickled)
# Two newlines around the size make life easy
sw.write(zo.compress('\n{}\n'.format(sz)))
sw.write(zo.compress(pickled))
sw.write(zo.flush())
# def write(self, trace):
# zo = zlib.compressobj()
# with fops.StreamWrite(self.fname, mode='wb') as sw:
# # pickle the trace and dump it
# # Remove pickling from here...this should be the lightest
# # process as it is the bottleneck
# pickled_trace = cP.dumps(trace, protocol=cP.HIGHEST_PROTOCOL)
# # add teo newlines, as this is *never*?? happens in a
# # pickle?
# #sw.write(DELIM)
# # write size
# sz = len(pickled_trace)
# sw.write(zo.compress('\n{}\n'.format(sz)))
# sw.write(zo.compress(pickled_trace))
# #sw.write(zo.compress('\n\n\n\n'))
# # can serialize, but then will have to do book keeping
# # for de serializing
# #sw.write(trace.serialize())
# sw.write(zo.flush())
# def trace_gen(self):
# with fops.ReadLine(self.fname, mode='rb') as sr:
# data = []
# line = sr.read()
# while line != str(''):
# if line == str('\n'):
# # remove the last '\n'
# data[-1] = data[-1][:-1]
# yield cP.loads(str('').join(data))
# data = []
# else:
# data.append(line)
# line = sr.read()
# return
class PickleStreamUnCompressor(object):
"""un-compresses a pickled stream from PickleStreamCompressor
read(): data = return unpickle(uncompress(stream))
Provides a read function which returns a generator of stored
pickles. Uses zlib for compression and cPickle for pickling
operations.
"""
def __init__(self, stream, compression=True):
"""__init__
Parameters
----------
stream : stream must support the readline() method:
Reads till newline and the returned string has
newline at the end. EOF is indicated by ''.
This follows the exact convention of python's
readline()
compression : Is on, can not be turned off.
"""
self.stream = stream
self.zo = zlib.decompressobj()
self.buf = ''
return
def get_data(self):
"""gets arbitrary amounts of data
Older function which required you to do buffer management.
The new ones are get_data_size() and get_data_re().
Returns
-------
Returns string of data or '' when the stream is empty.
"""
ucdata = ''
while not ucdata:
cdata = self.stream.readline()
if cdata == '':
return self.zo.flush()
ucdata = self.zo.decompress(cdata)
return ucdata
def get_data_size(self, size):
"""
Parameters
----------
size : size of data to be retuned.
It is actually the length of the string.
Returns
-------
s: string, such that len(s) == size
if more data than left in the stream is requested, an
exception is raised.
Notes
------
"""
assert(type(size) == int)
ucdata = self.buf
while len(ucdata) < size:
cdata = self.stream.readline()
if cdata == '':
ucdata += self.zo.flush()
if len(ucdata) != size:
raise err.Fatal('size is bigger than left over data!')
else:
ucdata += self.zo.decompress(cdata)
data, self.buf = ucdata[0:size], ucdata[size:]
return data
def get_data_re(self, re):
"""Gets data which matches re
A match is expected to be found, if not, throws an error.
Parameters
----------
re : compiled regex against which is matched
Returns
-------
Notes
------
"""
ucdata = self.buf
mo = re.search(ucdata)
while mo is None:
cdata = self.stream.readline()
if cdata == '':
ucdata += self.zo.flush()
# Different from get_data_size, because this function
# assumes that the end of data stream is not known.
if ucdata == '':
return ''
# if you get some data, there must be a match
mo = re.search(ucdata)
assert(mo is not None)
else:
ucdata += self.zo.decompress(cdata)
# if you get some data, there must be a match
mo = re.search(ucdata)
data = ucdata[mo.start():mo.end()]
self.buf = ucdata[mo.end():]
return data
def uncompressor(self):
"""Diagnose bug:
./scamr.py -f ../examples/vdp/vanDerPol.tst --simulate 100 --prop-check --par --seed 2"""
r = re.compile('\n[0-9]*\n')
data = self.get_data_re(r)
while data:
l = int(data[1:-1])
pickle = self.get_data_size(l)
yield cP.loads(pickle)
data = self.get_data_re(r)
return
def uncompressor2(self):
"""Older function which did buffer management itself"""
DELIM = '\n'
MT = ''
lbuf = MT
while True:
while lbuf.count(DELIM) < 2:
data = self.get_data()
lbuf += data
if data == MT:
return
_, l, buf = lbuf.split(DELIM, 2)
l = int(l)
assert(_ == MT)
while len(buf) < l:
D = self.get_data()
assert(D != MT)
buf += D
pickle, partial_pickle = buf[0:l], buf[l:]
yield cP.loads(pickle)
lbuf = partial_pickle
return
# def uncompressor(self):
# notdone = True
# empty = str('')
# DELIM = str('\n')
# while True:
# ucdata = self.get_data()
# partial_pickle = empty
# while notdone:
# try:
# _, sz, partial_pickle = ucdata.split(DELIM, 2)
# except:
# embed()
# assert(_ == empty)
# assert(partial_pickle != empty)
# sz = int(sz)
# assert(sz > 0)
# while len(partial_pickle) < sz:
# partial_pickle += self.get_data()#self.zo.decompress(self.stream.readline())
# pickle, partial_pickle = partial_pickle[0:sz], partial_pickle[sz:]
# yield cP.loads(pickle)
# notdone = bool(partial_pickle)
# ucdata = str(partial_pickle)
# return
class PickleStreamCompressor(object):
"""Pickles and compresses data and writes it to a stream.
Provides a function
writer(): return compress(pickle(data))
which pickles and compresses the given data.
Uses cpickle and zlib for pickling and compression, resp.
The pickling protocol used is HIGHEST_PROTOCOL.
The default compression level for zlib is used.
"""
def __init__(self, compression=True):
self.zo = zlib.compressobj()
return
def compress(self, data):
pickle = cP.dumps(data, protocol=cP.HIGHEST_PROTOCOL)
# size of pickle
sz = len(pickle)
# compressed pickle
#cpickle = self.zo.compress(pickle)
# Two newlines around the size make life easy
return self.zo.compress('\n{}\n{}'.format(sz, pickle))
def flush(self):
"""MUST be called at the end to flush remaining data if not
using as a context manager.
Follows zlib's interface"""
return self.zo.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flush()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import time
import unittest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from desktop.models import Document
from hadoop import cluster
from hadoop.conf import YARN_CLUSTERS
from hadoop.pseudo_hdfs4 import is_live_cluster
from hadoop.yarn import resource_manager_api, mapreduce_api, history_server_api
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.models import Workflow
from jobbrowser import models, views
from jobbrowser.conf import SHARE_JOBS
from jobbrowser.models import can_view_job, can_modify_job, Job, LinkJobLogs
LOG = logging.getLogger(__name__)
_INITIALIZED = False
class TestBrowser():
def test_dots_to_camel_case(self):
assert_equal("fooBar", models.dots_to_camel_case("foo.bar"))
assert_equal("fooBarBaz", models.dots_to_camel_case("foo.bar.baz"))
assert_equal("foo", models.dots_to_camel_case("foo"))
assert_equal("foo.", models.dots_to_camel_case("foo."))
def test_get_path(self):
assert_equal("/foo/bar", models.get_path("hdfs://host/foo/bar"))
def test_format_counter_name(self):
assert_equal("Foo Bar", views.format_counter_name("fooBar"))
assert_equal("Foo Bar Baz", views.format_counter_name("fooBarBaz"))
assert_equal("Foo", views.format_counter_name("foo"))
assert_equal("Foo.", views.format_counter_name("foo."))
assert_equal("A Bbb Ccc", views.format_counter_name("A_BBB_CCC"))
def get_hadoop_job_id(oozie_api, oozie_jobid, action_index=1, timeout=60, step=5):
hadoop_job_id = None
start = time.time()
while not hadoop_job_id and time.time() - start < timeout:
time.sleep(step)
hadoop_job_id = oozie_api.get_job(oozie_jobid).actions[action_index].externalId
if not hadoop_job_id:
logs = OozieServerProvider.oozie.get_job_log(oozie_jobid)
msg = "[%d] %s took more than %d to create a job: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
return hadoop_job_id
class TestJobBrowserWithHadoop(unittest.TestCase, OozieServerProvider):
requires_hadoop = True
@classmethod
def setup_class(cls):
OozieServerProvider.setup_class()
cls.username = 'hue_jobbrowser_test'
cls.home_dir = '/user/%s' % cls.username
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.client = make_logged_in_client(username=cls.username, is_superuser=False, groupname='test')
cls.user = User.objects.get(username=cls.username)
grant_access(cls.username, 'test', 'jobsub')
grant_access(cls.username, 'test', 'jobbrowser')
grant_access(cls.username, 'test', 'oozie')
add_to_group(cls.username)
cls.prev_user = cls.cluster.fs.user
cls.cluster.fs.setuser(cls.username)
cls.install_examples()
cls.design = cls.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = cls.design.id
response = cls.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid)
cls.hadoop_job_id = get_hadoop_job_id(cls.oozie, oozie_jobid, 1)
cls.hadoop_job_id_short = views.get_shorter_id(cls.hadoop_job_id)
@classmethod
def teardown_class(cls):
try:
Document.objects.filter(name__contains=cls.username).delete()
Workflow.objects.filter(name__contains=cls.username).delete()
# Remove user home directories.
cls.cluster.fs.do_as_superuser(cls.cluster.fs.rmtree, cls.home_dir)
except:
LOG.exception('failed to teardown %s' % cls.home_dir)
cls.cluster.fs.setuser(cls.prev_user)
@classmethod
def create_design(cls):
job_name = '%s_%s' % (cls.username, 'sleep_job')
if not Document.objects.available_docs(Workflow, cls.user).filter(name=job_name).exists():
response = cls.client.post(reverse('jobsub.views.new_design',
kwargs={'node_type': 'mapreduce'}),
data={'name': job_name,
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[]',
'archives': '[]',
'job_properties': '[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]'
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
return Document.objects.available_docs(Workflow, cls.user).get(name=job_name).content_object
@classmethod
def install_examples(cls):
global _INITIALIZED
if _INITIALIZED:
return
cls.client.post(reverse('oozie:install_examples'))
cls.cluster.fs.do_as_user(cls.username, cls.cluster.fs.create_home_dir, cls.home_dir)
cls.cluster.fs.do_as_superuser(cls.cluster.fs.chmod, cls.home_dir, 0777, True)
_INITIALIZED = True
def test_uncommon_views(self):
"""
These views exist, but tend not to be ever called, because they're not in the normal UI.
"""
raise SkipTest
TestJobBrowserWithHadoop.client.get("/jobbrowser/clusterstatus")
TestJobBrowserWithHadoop.client.get("/jobbrowser/queues")
TestJobBrowserWithHadoop.client.get("/jobbrowser/jobbrowser")
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
if is_live_cluster():
raise SkipTest('HUE-2902: Skipping because test is not reentrant')
# Create design that will fail because the script file isn't there
INPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/input'
OUTPUT_DIR = TestJobBrowserWithHadoop.home_dir + '/output'
try:
TestJobBrowserWithHadoop.cluster.fs.mkdir(TestJobBrowserWithHadoop.home_dir + "/jt-test_failed_jobs")
TestJobBrowserWithHadoop.cluster.fs.mkdir(INPUT_DIR)
TestJobBrowserWithHadoop.cluster.fs.rmtree(OUTPUT_DIR)
except:
LOG.exception('failed to teardown tests')
job_name = '%s_%s' % (TestJobBrowserWithHadoop.username, 'test_failed_jobs-1')
response = TestJobBrowserWithHadoop.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': [job_name],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = TestJobBrowserWithHadoop.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid)
hadoop_job_id = get_hadoop_job_id(TestJobBrowserWithHadoop.oozie, oozie_jobid, 1)
hadoop_job_id_short = views.get_shorter_id(hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'killed'})
assert_false(hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'failed'})
assert_true(hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' % (hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html, html)
# The map task should say success (empty input)
map_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = hadoop_job_id.replace('job', 'task') + '_r_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
def test_jobs_page(self):
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json'})
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'completed'})
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'failed'})
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'running'})
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'state': 'killed'})
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
def test_tasks_page(self):
raise SkipTest
# Test tracker page
early_task_id = TestJobBrowserWithHadoop.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks/%s' % (TestJobBrowserWithHadoop.hadoop_job_id, early_task_id))
tracker_url = re.search('<a href="(/jobbrowser/trackers/.+?)"', response.content).group(1)
response = TestJobBrowserWithHadoop.client.get(tracker_url)
assert_true('Tracker at' in response.content)
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'user': ''})
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = TestJobBrowserWithHadoop.client.post('/jobbrowser/jobs/', {'format': 'json', 'user': ''})
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.post('/jobbrowser/jobs/', {'format': 'json', 'user': ''})
assert_true(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.post('/jobbrowser/jobs/', {'format': 'json', 'user': ''})
assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content)
finally:
finish()
def test_job_counter(self):
raise SkipTest
# Single job page
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s' % TestJobBrowserWithHadoop.hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
def test_task_page(self):
raise SkipTest
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (TestJobBrowserWithHadoop.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
def test_job_single_logs(self):
if not is_live_cluster():
raise SkipTest
response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/%s/single_logs?format=json' % (TestJobBrowserWithHadoop.hadoop_job_id))
json_resp = json.loads(response.content)
assert_true('logs' in json_resp)
assert_true('Log Type: stdout' in json_resp['logs'][1])
assert_true('Log Type: stderr' in json_resp['logs'][2])
assert_true('Log Type: syslog' in json_resp['logs'][3])
# Verify that syslog contains log information for a completed oozie job
match = re.search(r"^Log Type: syslog(.+)Log Length: (?P<log_length>\d+)(.+)$", json_resp['logs'][3], re.DOTALL)
assert_true(match and match.group(2), 'Failed to parse log length from syslog')
log_length = match.group(2)
assert_true(log_length > 0, 'Log Length is 0, expected content in syslog.')
class TestMapReduce1NoHadoop:
def test_acls_job(self):
job = MockMr1Job()
assert_true(can_view_job('test', job))
assert_true(can_modify_job('test', job))
assert_false(can_view_job('test2', job))
assert_false(can_modify_job('test2', job))
class MockMr1Job(Job):
def __init__(self):
self.is_mr2 = False
self._full_job_conf = {
'mapreduce.cluster.acls.enabled': True,
'mapreduce.job.acl-modify-job': 'test',
'mapreduce.job.acl-view-job': 'test'
}
class TestMapReduce2NoHadoop:
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(mapreduce_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
self.c3 = make_logged_in_client(is_superuser=False, username="test3")
grant_access("test3", "test3", "jobbrowser")
self.user3 = User.objects.get(username='test3')
resource_manager_api.get_resource_manager = lambda username: MockResourceManagerApi(username)
mapreduce_api.get_mapreduce_api = lambda username: MockMapreduceApi(username)
history_server_api.get_history_server_api = lambda username: HistoryServerApi(username)
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
def tearDown(self):
resource_manager_api.get_resource_manager = getattr(resource_manager_api, 'old_get_resource_manager')
mapreduce_api.get_mapreduce_api = getattr(mapreduce_api, 'old_get_mapreduce_api')
history_server_api.get_history_server_api = getattr(history_server_api, 'old_get_history_server_api')
for f in self.finish:
f()
def test_jobs(self):
response = self.c.post('/jobbrowser/', {'format': 'json'})
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 4)
response = self.c.post('/jobbrowser/jobs/', {'format': 'json', 'text': 'W=MapReduce-copy2'})
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 1)
def test_applications_no_start_time(self):
response = self.c.post('/jobbrowser/', {'format': 'json'})
data = json.loads(response.content)
job = [j for j in data['jobs'] if j['id'] == 'application_1428442704693_0007']
assert_true(job, job)
job = job[0]
assert_equal('', job['startTimeFormatted'], data)
assert_equal('', job['durationFormatted'], data)
def test_running_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content, response.content)
assert_true('RUNNING' in response.content)
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
def test_application_no_start_time(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0007?format=json')
data = json.loads(response.content)
assert_equal('', data['job']['startTimeFormatted'], data)
assert_equal('', data['job']['durationFormatted'], data)
def test_finished_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
def test_spark_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0006')
assert_equal(response.context['job'].jobId, 'application_1428442704693_0006')
def test_yarn_job(self):
response = self.c.get('/jobbrowser/jobs/application_1428442704693_0007')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
def job_not_assigned(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url')
assert_equal(response.context['jobid'], 'job_1356251510842_0009')
assert_equal(response.context['path'], '/my_url')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url?format=json')
result = json.loads(response.content)
assert_equal(result['status'], 0)
def test_acls_job(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054') # Check in perm decorator
assert_true(can_view_job('test', response.context['job']))
assert_true(can_modify_job('test', response.context['job']))
assert_true(can_view_job('test2', response.context['job']))
assert_false(can_modify_job('test2', response.context['job']))
assert_false(can_view_job('test3', response.context['job']))
assert_false(can_modify_job('test3', response.context['job']))
response2 = self.c3.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('don't have permission to access job' in response2.content, response2.content)
def test_kill_job(self):
job_id = 'application_1356251510842_0054'
try:
response = self.c.post('/jobbrowser/jobs/%s/kill?format=json' % job_id)
assert_equal(json.loads(response.content), {"status": 0})
finally:
MockResourceManagerApi.APPS[job_id]['state'] = 'RUNNING'
response = self.c2.post('/jobbrowser/jobs/%s/kill?format=json' % job_id)
assert_true('Kill operation is forbidden.' in response.content, response.content)
class MockResourceManagerApi:
APPS = {
'application_1356251510842_0054': {
u'finishedTime': 1356961070119,
u'name': u'oozie:launcher:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0054_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0054/jobhistory/job/job_1356251510842_0054',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356961057225,
u'queue': u'default',
u'state': u'RUNNING',
u'elapsedTime': 12894,
u'finalStatus': u'UNDEFINED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0054',
u'user': u'test',
# For when the job is KILLED
u'startTime': 1356961057226,
u'finishTime': 1356961057226,
u'applicationType': 'MAPREDUCE'
},
'application_1356251510842_0009': {
u'finishedTime': 1356467118570,
u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy2:A=Sleep:ID=0000002-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1356251510842_0009_01_000001/romain',
u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0009/jobhistory/job/job_1356251510842_0009',
u'amHostHttpAddress': u'localhost:8042',
u'startedTime': 1356467081121,
u'queue': u'default',
u'state': u'FINISHED',
u'elapsedTime': 37449,
u'finalStatus': u'SUCCEEDED',
u'diagnostics': u'',
u'progress': 100.0,
u'trackingUI': u'History',
u'id': u'application_1356251510842_0009',
u'user': u'test',
u'applicationType': 'MAPREDUCE'
},
'application_1428442704693_0006': {
u'allocatedMB': 4096,
u'allocatedVCores': 3,
u'amContainerLogs': u'http://localhost:8042/node/containerlogs/container_1428442704693_0006_01_000001/erickt',
u'amHostHttpAddress': u'localhost:8042',
u'applicationTags': u'',
u'applicationType': u'SPARK',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 529040,
u'finalStatus': u'UNDEFINED',
u'finishedTime': 0,
u'id': u'application_1428442704693_0006',
u'memorySeconds': 2138468,
u'name': u'Spark shell',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 10.0,
u'queue': u'root.erickt',
u'runningContainers': 3,
u'startedTime': 1428443335161,
u'state': u'RUNNING',
u'trackingUI': u'ApplicationMaster',
u'trackingUrl': u'http://localhost:8088/proxy/application_1428442704693_0006/',
u'user': u'test',
u'vcoreSeconds': 1567,
},
'application_1428442704693_0007': {
u'allocatedMB': -1,
u'allocatedVCores': -1,
u'applicationTags': u'',
u'applicationType': u'YARN',
u'clusterId': 1428442704693,
u'diagnostics': u'',
u'elapsedTime': 4056,
u'finalStatus': u'SUCCEEDED',
u'finishedTime': 1428454945371,
u'id': u'application_1428442704693_0007',
u'memorySeconds': 2290,
u'name': u'UnmanagedAM',
u'numAMContainerPreempted': 0,
u'numNonAMContainerPreempted': 0,
u'preemptedResourceMB': 0,
u'preemptedResourceVCores': 0,
u'progress': 100.0,
u'queue': u'root.erickt',
u'runningContainers': -1,
u'startedTime': 0,
u'state': u'FINISHED',
u'trackingUI': u'History',
u'trackingUrl': u'http://N/A',
u'user': u'test',
u'vcoreSeconds': 1,
}
}
def __init__(self, user, rm_url=None): pass
def apps(self, **kwargs):
return {
'apps': {
'app': [
# RUNNING
MockResourceManagerApi.APPS['application_1356251510842_0054'],
# FINISHED
MockResourceManagerApi.APPS['application_1356251510842_0009'],
# SPARK
MockResourceManagerApi.APPS['application_1428442704693_0006'],
# YARN
MockResourceManagerApi.APPS['application_1428442704693_0007'],
]
}
}
def app(self, job_id):
return {
u'app': MockResourceManagerApi.APPS[job_id]
}
class MockMapreduce2Api(object):
"""
MockMapreduceApi and HistoryServerApi are very similar and inherit from it.
"""
def __init__(self, mr_url=None): pass
def tasks(self, job_id):
return {
u'tasks': {
u'task': [{
u'finishTime': 1357153330271, u'successfulAttempt': u'attempt_1356251510842_0062_m_000000_0', u'elapsedTime': 1901, u'state': u'SUCCEEDED',
u'startTime': 1357153328370, u'progress': 100.0, u'type': u'MAP', u'id': u'task_1356251510842_0062_m_000000'},
{
u'finishTime': 0, u'successfulAttempt': u'', u'elapsedTime': 0, u'state': u'SCHEDULED', u'startTime': 1357153326322, u'progress': 0.0,
u'type': u'REDUCE', u'id': u'task_1356251510842_0062_r_000000'}
]
}
}
def conf(self, job_id):
return {
"conf" : {
"path" : "hdfs://host.domain.com:9000/user/user1/.staging/job_1326232085508_0004/job.xml",
"property" : [
{
"name" : "dfs.datanode.data.dir",
"value" : "/home/hadoop/hdfs/data",
}, {
"name" : "mapreduce.job.acl-modify-job",
"value" : "test",
}, {
"name" : "mapreduce.job.acl-view-job",
"value" : "test,test2",
}
]
}
}
def job_attempts(self, job_id):
return {
"jobAttempts" : {
"jobAttempt" : [
{
"nodeId" : "host.domain.com:8041",
"nodeHttpAddress" : "host.domain.com:8042",
"startTime" : 1326238773493,
"id" : 1,
"logsLink" : "http://host.domain.com:8042/node/containerlogs/container_1326232085508_0004_01_000001",
"containerId" : "container_1326232085508_0004_01_000001"
}
]
}
}
def task_attempts(self, job_id, task_id):
return {
"taskAttempts" : {
"taskAttempt" : [
{
"elapsedMergeTime" : 47,
"shuffleFinishTime" : 1326238780052,
"assignedContainerId" : "container_1326232085508_0004_01_000003",
"progress" : 100,
"elapsedTime" : 0,
"state" : "RUNNING",
"elapsedShuffleTime" : 2592,
"mergeFinishTime" : 1326238780099,
"rack" : "/98.139.92.0",
"elapsedReduceTime" : 0,
"nodeHttpAddress" : "host.domain.com:8042",
"type" : "REDUCE",
"startTime" : 1326238777460,
"id" : "attempt_1326232085508_4_4_r_0_0",
"finishTime" : 0
}
]
}
}
def counters(self, job_id):
return {
"jobCounters" : {
"id" : "job_1326232085508_4_4",
"counterGroup" : [
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_READ"
}
]
},
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_WRITTEN"
}
]
}
]
}
}
def kill(self, job_id):
job_id = job_id.replace('job', 'application')
MockResourceManagerApi.APPS[job_id]['state'] = 'KILLED'
return {}
class MockMapreduceApi(MockMapreduce2Api):
def job(self, user, job_id):
if '1356251510842_0009' not in job_id:
job = {
u'job': {
u'reducesCompleted': 0, u'mapsRunning': 1, u'id': u'job_1356251510842_0054', u'successfulReduceAttempts': 0, u'successfulMapAttempts': 0,
u'uberized': False, u'reducesTotal': 1, u'elapsedTime': 3426, u'mapsPending': 0, u'state': u'RUNNING', u'failedReduceAttempts': 0,
u'mapsCompleted': 0, u'killedMapAttempts': 0, u'killedReduceAttempts': 0, u'runningReduceAttempts': 0, u'failedMapAttempts': 0, u'mapsTotal': 1,
u'user': u'test', u'startTime': 1357152972886, u'reducesPending': 1, u'reduceProgress': 0.0, u'finishTime': 0,
u'name': u'select avg(salary) from sample_07(Stage-1)', u'reducesRunning': 0, u'newMapAttempts': 0, u'diagnostics': u'', u'mapProgress': 0.0,
u'runningMapAttempts': 1, u'newReduceAttempts': 1,
# Does not seems to exist in API, we actually skip it in case.
"acls" : [{
"value" : "test",
"name" : "mapreduce.job.acl-modify-job"
}, {
"value" : "test",
"name" : "mapreduce.job.acl-view-job"
}
],
}
}
job['job']['id'] = job_id
return job
class HistoryServerApi(MockMapreduce2Api):
def __init__(self, hs_url=None): pass
def job(self, user, job_id):
if '1356251510842_0054' == job_id:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': job_id,
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'KILLED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
else:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': u'job_1356251510842_0009',
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'SUCCEEDED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 0, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
def test_make_log_links():
"""
Unit test for models.LinkJobLogs._make_links
"""
# FileBrowser
assert_equal(
"""<a href="/filebrowser/view=/user/romain/tmp">hdfs://localhost:8020/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp <dir>')
)
assert_equal(
"""<a href="/filebrowser/view=/user/romain/tmp">hdfs://localhost:8020/user/romain/tmp</a><dir>""",
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp<dir>')
)
assert_equal(
"""output: <a href="/filebrowser/view=/user/romain/tmp">/user/romain/tmp</a> <dir>""",
LinkJobLogs._make_links('output: /user/romain/tmp <dir>')
)
assert_equal(
'Successfully read 3760 records (112648 bytes) from: "<a href="/filebrowser/view=/user/hue/pig/examples/data/midsummer.txt">/user/hue/pig/examples/data/midsummer.txt</a>"',
LinkJobLogs._make_links('Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"')
)
assert_equal(
'data,upper_case MAP_ONLY <a href="/filebrowser/view=/user/romain/out/fffff">hdfs://localhost:8020/user/romain/out/fffff</a>,',
LinkJobLogs._make_links('data,upper_case MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff,')
)
assert_equal(
'MAP_ONLY <a href="/filebrowser/view=/user/romain/out/fffff">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
LinkJobLogs._make_links('MAP_ONLY hdfs://localhost:8020/user/romain/out/fffff\n2013')
)
assert_equal(
' <a href="/filebrowser/view=/jobs.tsv">/jobs.tsv</a> ',
LinkJobLogs._make_links(' /jobs.tsv ')
)
assert_equal(
'<a href="/filebrowser/view=/user/romain/job_pos_2012.tsv">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/job_pos_2012.tsv')
)
# JobBrowser
assert_equal(
"""<a href="/jobbrowser/jobs/job_201306261521_0058">job_201306261521_0058</a>""",
LinkJobLogs._make_links('job_201306261521_0058')
)
assert_equal(
"""Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058">job_201306261521_0058</a>""",
LinkJobLogs._make_links('Hadoop Job IDs executed by Pig: job_201306261521_0058')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058">job_201306261521_0058</a>""",
LinkJobLogs._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058')
)
assert_equal(
"""- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058">job_201306261521_0058</a>""",
LinkJobLogs._make_links('- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058')
)
assert_equal(
""" Logging error messages to: <a href="/jobbrowser/jobs/job_201307091553_0028">job_201307091553_0028</a>/attempt_201307091553_002""",
LinkJobLogs._make_links(' Logging error messages to: job_201307091553_0028/attempt_201307091553_002')
)
assert_equal(
""" pig-<a href="/jobbrowser/jobs/job_201307091553_0028">job_201307091553_0028</a>.log""",
LinkJobLogs._make_links(' pig-job_201307091553_0028.log')
)
assert_equal(
"""MapReduceLauncher - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058">job_201306261521_0058</a>. Look at the UI""",
LinkJobLogs._make_links('MapReduceLauncher - HadoopJobId: job_201306261521_0058. Look at the UI')
)
|
|
'''
The MIT License (MIT)
Copyright (c) 2015 Thami Rusdi Agus - https://github.com/janglapuk/SPB-OpenCV-Recognizer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from tkinter import messagebox, Tk
import pygubu, re
import util
import camera
from PIL import ImageTk, Image
import constants
import os, sys
# import custom widgets
sys.path.append(constants.UI_DIR)
class GUI:
camera = None
camera_run = False
camera_list = []
canvas = None
DB_toggle = False
def __init__(self):
self.root = root = Tk()
self.builder = builder = pygubu.Builder()
toplevel = root.winfo_toplevel()
toplevel.title("%s %s" % (constants.APP_NAME, constants.APP_VERSION))
builder.add_from_file(constants.UI_MAIN_FILE)
builder.get_object('frame_main', root)
builder.connect_callbacks(self)
self._make_center(toplevel)
self._init_camera_sources()
self._init_masks()
def _init_camera_sources(self):
cam_list = util.get_cameras(2)
cam_source = self.builder.get_object('combo_source')
cam_dict = {}
for cam in cam_list:
cam_dict['Camera #%d' % cam] = int(cam)
cam_source.setdict(cam_dict)
cam_source.current(0)
cam_source.configure(state='readonly')
if cam_source.dictlength() == 0:
cam_toggle = self.builder.get_object('chk_cam_toggle')
cam_toggle.configure(state='disabled')
def show_error():
messagebox.showerror("Kesalahan",
"Nampaknya tidak ada sumber kamera yang dapat digunakan pada aplikasi ini. " +
"Silakan periksa perangkat kamera anda dan pastikan telah bekerja dengan baik.")
cam_toggle.after(1000, show_error)
def _init_masks(self):
combox = self.builder.get_object('combo_mask')
masks = []
for _, _, files in os.walk(constants.RESOURCE_MASK_DIR):
for file in files:
if file.endswith('.png'):
masks.append(file)
combox.configure(values=masks)
combox.current(0)
def _make_center(self, top_level):
top_level.update_idletasks()
w = top_level.winfo_screenwidth()
h = top_level.winfo_screenheight()
size = tuple(int(_) for _ in top_level.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
top_level.geometry("%dx%d+%d+%d" % (size + (x, y)))
def _combo_source_handler(self, event):
print(event)
pass
def _set_default_screen(self):
canvas = self.builder.get_object('canvas_cam')
im = Image.open(os.path.join(constants.RESOURCE_DIR, 'bg.jpg'))
canvas.image = ImageTk.PhotoImage(im)
canvas.delete('all')
canvas.create_image(0, 0, anchor='nw', image=canvas.image)
canvas.update()
def _on_cam_toggle(self):
var = self.builder.get_variable('var_cam_toggle')
activated = bool(var.get())
cam_source = self.builder.get_object('combo_source')
if activated:
cam_source.configure(state='disabled')
self.builder.get_object('btn_DB_toggle').configure(state='normal')
self.start_camera()
self.log("Camera: ON")
else:
cam_source.configure(state='readonly')
self.builder.get_object('btn_DB_toggle').configure(state='disabled')
self.stop_camera()
self.log("Camera: OFF")
def _on_face_recognizer_clicked(self):
print('_on_face_recognize_clicked')
if self.camera is not None:
self.camera.setmode(camera.CAMERA_MODE_FACE_RECOG)
self.log("Mode: Pengenalan wajah")
def _on_default_clicked(self):
print('_on_default_clicked')
if self.camera is not None:
self.camera.setmode(camera.CAMERA_MODE_DEFAULT)
self.log("Mode: Default")
def _on_face_changer_clicked(self):
if self.camera is not None:
self.camera.setmode(camera.CAMERA_MODE_FACE_CHANGER)
self.log("Mode: Pengubah Wajah")
def _on_object_tracking_clicked(self):
if self.camera is not None:
self.camera.setmode(camera.CAMERA_MODE_GEST_RECOG)
self.log("Mode: Pelacak Objek")
def _on_db_toggle(self):
self.DB_toggle = not self.DB_toggle
if self.DB_toggle:
self.builder.get_object('btn_DB_toggle').configure(text='Off')
self.builder.get_object('btn_DB_snap').configure(state='normal')
entry = self.builder.get_object('entry_DB_name')
entry.configure(state='normal')
entry.focus_set()
self.log("Database ON")
else:
self.builder.get_object('btn_DB_toggle').configure(text='On')
self.builder.get_object('btn_DB_snap').configure(state='disabled')
self.builder.get_object('entry_DB_name').configure(state='disabled')
self.builder.get_variable("var_entry_DB_name").set('')
self.log("Database: OFF")
def _on_db_snap_clicked(self):
var_db_name = self.builder.get_variable("var_entry_DB_name")
if len(var_db_name.get()) < 3:
messagebox.showerror("Error", "Nama untuk database kurang dari 3 karakter!")
self.builder.get_object('entry_DB_name').focus_set()
else:
if self.camera is not None:
var = self.builder.get_variable('var_entry_DB_name')
self.camera.snap_face(var.get())
def _selected_cam(self):
'''
!DEPRECATED!
:return:
'''
var = self.builder.get_variable('var_selected_cam')
sel = re.findall(r'\d', var.get())
if len(sel) > 0:
return sel[0]
return -1
def log(self, text):
tv = self.builder.get_object('tv_log')
tv.insert("", 0, text=text)
def update_image(self, image):
if self.canvas is None:
self.canvas = self.builder.get_object('canvas_cam')
self.canvas.delete('all')
self.canvas.create_image(0, 0, anchor='nw', image=image)
self.canvas.update()
return self.canvas
def start_camera(self):
cam_num = int(self._selected_cam())
if cam_num >= 0:
self.camera = camera.Camera(self, cam_num)
self.camera.start()
else:
print('Error on camera')
def stop_camera(self):
self.camera.stop()
self._set_default_screen()
self.camera = None
def show(self):
self._set_default_screen()
self.root.mainloop()
def get_mask_zoom_value(self):
scale = self.builder.get_object('scale_zoom')
return scale.get()
def get_mask_name(self):
var = self.builder.get_variable('var_combo_mask')
return var.get()
|
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Execute a volatility command on the client memory.
This module implements the volatility enabled client actions which enable
volatility to operate directly on the client.
"""
# Initialize the volatility plugins, so pylint: disable=unused-import
from volatility import addrspace
from volatility import obj
from volatility import plugins
from volatility import session
from volatility.plugins.addrspaces import standard
from volatility.ui import renderer
# pylint: enable=unused-import
from grr.client import actions
from grr.client import vfs
from grr.lib import rdfvalue
from grr.lib import utils
# pylint: disable=g-bad-name
class ProtobufRenderer(renderer.RendererBaseClass):
"""This stores all the data in a protobuf."""
class Modes(object):
TABLE = 1
STRING = 2
def __init__(self, **kwargs):
super(ProtobufRenderer, self).__init__(**kwargs)
self.response = rdfvalue.VolatilityResult()
self.active_section = None
self.mode = None
def InitSection(self, mode=None):
if self.mode != mode and self.active_section:
self.response.sections.Append(self.active_section)
self.active_section = None
if not self.active_section:
self.active_section = rdfvalue.VolatilitySection()
self.mode = mode
def end(self):
self.response.sections.Append(self.active_section)
def start(self, plugin_name=None, kwargs=None):
_ = kwargs
if plugin_name:
self.response.plugin = plugin_name
def write(self, data):
self.format(data)
def format(self, formatstring, *data):
_ = formatstring, data
self.InitSection(self.Modes.STRING)
active_list = self.active_section.formatted_value_list
formatted_value = active_list.formatted_values.Append()
formatted_value.formatstring = formatstring
values = formatted_value.data
for d in data:
self.AddValue(values, d)
def section(self):
self.response.sections.Append(self.active_section)
self.active_section = None
def flush(self):
pass
def table_header(self, title_format_list=None, suppress_headers=False,
name=None):
_ = suppress_headers, name
self.InitSection(self.Modes.TABLE)
for (print_name, name, format_hint) in title_format_list:
self.active_section.table.headers.Append(print_name=print_name,
name=name,
format_hint=format_hint)
def AddValue(self, row, value):
response = row.values.Append()
if isinstance(value, obj.BaseObject):
response.type = value.obj_type
response.name = value.obj_name
response.offset = value.obj_offset
response.vm = utils.SmartStr(value.obj_vm)
try:
response.value = value.__int__()
except (AttributeError, ValueError):
pass
try:
string_value = value.__unicode__()
except (AttributeError, ValueError):
try:
string_value = value.__str__()
except (AttributeError, ValueError):
pass
if string_value:
try:
int_value = int(string_value)
# If the string converts to an int but to a different one as the int
# representation, we send it.
if int_value != response.value:
response.svalue = utils.SmartUnicode(string_value)
except ValueError:
# We also send if it doesn't convert back to an int.
response.svalue = utils.SmartUnicode(string_value)
elif isinstance(value, (bool)):
response.svalue = utils.SmartUnicode(str(value))
elif isinstance(value, (int, long)):
response.value = value
elif isinstance(value, (basestring)):
response.svalue = utils.SmartUnicode(value)
elif isinstance(value, obj.NoneObject):
response.type = value.__class__.__name__
response.reason = value.reason
else:
response.svalue = utils.SmartUnicode(repr(value))
def table_row(self, *args):
"""Outputs a single row of a table."""
self.InitSection(self.Modes.TABLE)
row = self.active_section.table.rows.Append()
for value in args:
self.AddValue(row, value)
def GetResponse(self):
return self.response
def RenderProgress(self, *args):
self.session.progress(*args)
class UnicodeStringIO(object):
"""Just like StringIO but uses unicode strings."""
def __init__(self):
self.data = u""
# Have to stick to an interface here so pylint: disable=g-bad-name
def write(self, data):
self.data += utils.SmartUnicode(data)
def getvalue(self):
return self.data
class CachingFDAddressSpace(addrspace.CachingAddressSpaceMixIn,
standard.FDAddressSpace):
"""A Caching version of the address space."""
class VolatilityAction(actions.ActionPlugin):
"""Runs a volatility command on live memory."""
in_rdfvalue = rdfvalue.VolatilityRequest
out_rdfvalue = rdfvalue.VolatilityResult
def Run(self, request):
"""Run a volatility plugin and return the result."""
def Progress(message=None, **_):
"""Allow volatility to heartbeat us so we do not die."""
_ = message
self.Progress()
# Create a session and run all the plugins with it.
with vfs.VFSOpen(request.device) as fhandle:
session_args = request.session.ToDict()
vol_session = session.Session(**session_args)
# Make the physical address space by wrapping our VFS handler.
vol_session.physical_address_space = CachingFDAddressSpace(
fhandle=fhandle)
# Set the progress method so the nanny is heartbeat.
vol_session.progress = Progress
vol_session.renderer = "ProtobufRenderer"
# Get the dtb from the driver if possible,
# it significantly speeds up detection.
try:
vol_session.dtb = fhandle.cr3
except AttributeError:
pass
# Get the kdbg from the driver if possible,
# it significantly speeds up detection.
try:
vol_session.kdbg = fhandle.kdbg
except AttributeError:
pass
# Which profile should be used?
if request.profile:
vol_session.profile = request.profile
else:
vol_session.plugins.guess_profile().update_session()
if not vol_session.profile:
raise RuntimeError("Unable to autodetect profile")
# Try to load the kernel address space now.
if not vol_session.kernel_address_space:
vol_session.plugins.load_as().GetVirtualAddressSpace()
# Get the keyword args to this plugin.
vol_args = request.args.ToDict()
for plugin, plugin_args in vol_args.items():
error = ""
# Heartbeat the client to ensure we keep our nanny happy.
vol_session.progress(message="Running plugin %s" % plugin)
ui_renderer = ProtobufRenderer(session=vol_session)
if plugin_args is None:
plugin_args = {}
else:
plugin_args = plugin_args.ToDict()
try:
vol_session.vol(plugin, renderer=ui_renderer, **plugin_args)
except Exception as e: # pylint: disable=broad-except
error = str(e)
response = ui_renderer.GetResponse()
if error:
response.error = error
# Send it back to the server.
self.SendReply(response)
|
|
# sqlalchemy/events.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`.Pool`.
The rationale for :meth:`.PoolEvents.first_connect` is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`.Pool` refers to a single "creator" function (which in terms
of a :class:`.Engine` refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent connections,
such as the database version, the server and client encoding settings,
collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the lifespan
of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connnection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
the :meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation".
The event occurs before a final attempt to call ``.close()`` on the connection
occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s" % statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s" % statement)
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
"Only the 'before_execute' and "
"'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string
SQL statement and DBAPI-specific parameter list to be invoked
against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes. In general, user code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. versionadded:: 0.7.7
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the :attr:`.Connection.connection`
attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy of a
:class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is called.
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating basic Spark SQL features.
Run with:
./bin/spark-submit examples/src/main/python/sql/basic.py
"""
from __future__ import print_function
# $example on:init_session$
from pyspark.sql import SparkSession
# $example off:init_session$
# $example on:schema_inferring$
from pyspark.sql import Row
# $example off:schema_inferring$
# $example on:programmatic_schema$
# Import data types
from pyspark.sql.types import *
# $example off:programmatic_schema$
def basic_df_example(spark):
# $example on:create_df$
# spark is an existing SparkSession
df = spark.read.json("examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:create_df$
# $example on:untyped_ops$
# spark, df are from the previous example
# Print the schema in a tree format
df.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Select only the "name" column
df.select("name").show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# Select everybody, but increment the age by 1
df.select(df['name'], df['age'] + 1).show()
# +-------+---------+
# | name|(age + 1)|
# +-------+---------+
# |Michael| null|
# | Andy| 31|
# | Justin| 20|
# +-------+---------+
# Select people older than 21
df.filter(df['age'] > 21).show()
# +---+----+
# |age|name|
# +---+----+
# | 30|Andy|
# +---+----+
# Count people by age
df.groupBy("age").count().show()
# +----+-----+
# | age|count|
# +----+-----+
# | 19| 1|
# |null| 1|
# | 30| 1|
# +----+-----+
# $example off:untyped_ops$
# $example on:run_sql$
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:run_sql$
# $example on:global_temp_view$
# Register the DataFrame as a global temporary view
df.createGlobalTempView("people")
# Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:global_temp_view$
def schema_inference_example(spark):
# $example on:schema_inferring$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
teenNames = teenagers.rdd.map(lambda p: "Name: " + p.name).collect()
for name in teenNames:
print(name)
# Name: Justin
# $example off:schema_inferring$
def programmatic_schema_example(spark):
# $example on:programmatic_schema$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
# Each line is converted to a tuple.
people = parts.map(lambda p: (p[0], p[1].strip()))
# The schema is encoded in a string.
schemaString = "name age"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = spark.createDataFrame(people, schema)
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = spark.sql("SELECT name FROM people")
results.show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# $example off:programmatic_schema$
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# $example off:init_session$
basic_df_example(spark)
schema_inference_example(spark)
programmatic_schema_example(spark)
spark.stop()
|
|
""" Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('trainOutput.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 40
n_in = 4
n_out = 30
n_steps = 20
n_seq = 300
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
for l in open("inputdata-b03-300-20.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest = []
count = 0
dataTest = []
for l in open("inputdata-b03-100-20.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest.append(row)
if (count == n_steps):
count = 0
if len(dataTest) >0:
seqlistTest.append(dataTest)
dataTest = []
seqarrayTest = np.asarray(seqlistTest)
seqTest = seqarrayTest[:,:,:n_in]
targetsTest = seqarrayTest[:,:,n_in:]
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.1, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest, targetsTest, validation_frequency=1000)
ferror = file('errorRate.txt','a+')
[seqNum,lineNum,colNum] = targetsTest.shape
#print (seqTest.shape)
seqs = xrange(seqNum)
error = [0 for i in range(lineNum*seqNum)]
errorsum = 0
for k in seqs:
guess = model.predict_proba(seqTest[k])
dif = abs(guess - targetsTest[k])
[lineDif,colDif] = dif.shape
#print(lineDif,colDif)
for i in range (lineDif):
ki = k*lineDif+i
for j in range (colDif):
if (dif[i][j] > 0.5):
error[ki] += 1
ferror.write('error %d = %d \n' % (ki,error[ki]))
if (error[ki]>0):
errorsum += 1
print(errorsum)
errorRate = errorsum/1.0/seqNum/lineNum
ferror.write("average error = %f \n" % (errorRate))
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed: model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
logging.basicConfig(
level = logging.INFO,
format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
datafmt = '%m-%d %H:%M',
filename = "D:/logresult20160123/one.log",
filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=150)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
|
|
"""Class to manage the entities for a single platform."""
from __future__ import annotations
import asyncio
from contextvars import ContextVar
from datetime import datetime, timedelta
from logging import Logger
from types import ModuleType
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, Iterable, List, Optional
from homeassistant import config_entries
from homeassistant.const import ATTR_RESTORED, DEVICE_DEFAULT_NAME
from homeassistant.core import (
CALLBACK_TYPE,
ServiceCall,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.helpers import config_validation as cv, service
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.async_ import run_callback_threadsafe
from .entity_registry import DISABLED_INTEGRATION
from .event import async_call_later, async_track_time_interval
if TYPE_CHECKING:
from .entity import Entity
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
SLOW_ADD_ENTITY_MAX_WAIT = 15 # Per Entity
SLOW_ADD_MIN_TIMEOUT = 500
PLATFORM_NOT_READY_RETRIES = 10
DATA_ENTITY_PLATFORM = "entity_platform"
PLATFORM_NOT_READY_BASE_WAIT_TIME = 30 # seconds
class EntityPlatform:
"""Manage the entities for a single platform."""
def __init__(
self,
*,
hass: HomeAssistantType,
logger: Logger,
domain: str,
platform_name: str,
platform: Optional[ModuleType],
scan_interval: timedelta,
entity_namespace: Optional[str],
):
"""Initialize the entity platform."""
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.config_entry: Optional[config_entries.ConfigEntry] = None
self.entities: Dict[str, Entity] = {}
self._tasks: List[asyncio.Future] = []
# Stop tracking tasks after setup is completed
self._setup_complete = False
# Method to cancel the state change listener
self._async_unsub_polling: Optional[CALLBACK_TYPE] = None
# Method to cancel the retry of setup
self._async_cancel_retry_setup: Optional[CALLBACK_TYPE] = None
self._process_updates: Optional[asyncio.Lock] = None
self.parallel_updates: Optional[asyncio.Semaphore] = None
# Platform is None for the EntityComponent "catch-all" EntityPlatform
# which powers entity_component.add_entities
self.parallel_updates_created = platform is None
hass.data.setdefault(DATA_ENTITY_PLATFORM, {}).setdefault(
self.platform_name, []
).append(self)
def __repr__(self) -> str:
"""Represent an EntityPlatform."""
return f"<EntityPlatform domain={self.domain} platform_name={self.platform_name} config_entry={self.config_entry}>"
@callback
def _get_parallel_updates_semaphore(
self, entity_has_async_update: bool
) -> Optional[asyncio.Semaphore]:
"""Get or create a semaphore for parallel updates.
Semaphore will be created on demand because we base it off if update method is async or not.
If parallel updates is set to 0, we skip the semaphore.
If parallel updates is set to a number, we initialize the semaphore to that number.
The default value for parallel requests is decided based on the first entity that is added to Home Assistant.
It's 0 if the entity defines the async_update method, else it's 1.
"""
if self.parallel_updates_created:
return self.parallel_updates
self.parallel_updates_created = True
parallel_updates = getattr(self.platform, "PARALLEL_UPDATES", None)
if parallel_updates is None and not entity_has_async_update:
parallel_updates = 1
if parallel_updates == 0:
parallel_updates = None
if parallel_updates is not None:
self.parallel_updates = asyncio.Semaphore(parallel_updates)
return self.parallel_updates
async def async_setup(self, platform_config, discovery_info=None): # type: ignore[no-untyped-def]
"""Set up the platform from a config file."""
platform = self.platform
hass = self.hass
if not hasattr(platform, "async_setup_platform") and not hasattr(
platform, "setup_platform"
):
self.logger.error(
"The %s platform for the %s integration does not support platform setup. Please remove it from your config.",
self.platform_name,
self.domain,
)
return
@callback
def async_create_setup_task() -> Coroutine:
"""Get task to set up platform."""
if getattr(platform, "async_setup_platform", None):
return platform.async_setup_platform( # type: ignore
hass,
platform_config,
self._async_schedule_add_entities,
discovery_info,
)
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
return hass.loop.run_in_executor( # type: ignore[return-value]
None,
platform.setup_platform, # type: ignore
hass,
platform_config,
self._schedule_add_entities,
discovery_info,
)
await self._async_setup_platform(async_create_setup_task)
async def async_setup_entry(self, config_entry: config_entries.ConfigEntry) -> bool:
"""Set up the platform from a config entry."""
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task(): # type: ignore[no-untyped-def]
"""Get task to set up platform."""
return platform.async_setup_entry( # type: ignore
self.hass, config_entry, self._async_schedule_add_entities
)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(
self, async_create_setup_task: Callable[[], Coroutine], tries: int = 0
) -> bool:
"""Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform.
"""
current_platform.set(self)
logger = self.logger
hass = self.hass
full_name = f"{self.domain}.{self.platform_name}"
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING,
logger.warning,
"Setup of %s platform %s is taking over %s seconds.",
self.domain,
self.platform_name,
SLOW_SETUP_WARNING,
)
try:
task = async_create_setup_task()
async with hass.timeout.async_timeout(SLOW_SETUP_MAX_WAIT, self.domain):
await asyncio.shield(task)
# Block till all entities are done
while self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.gather(*pending)
hass.config.components.add(full_name)
self._setup_complete = True
return True
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * PLATFORM_NOT_READY_BASE_WAIT_TIME
logger.warning(
"Platform %s not ready yet. Retrying in %d seconds.",
self.platform_name,
wait_time,
)
async def setup_again(now): # type: ignore[no-untyped-def]
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(async_create_setup_task, tries)
self._async_cancel_retry_setup = async_call_later(
hass, wait_time, setup_again
)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name,
SLOW_SETUP_MAX_WAIT,
)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up %s platform for %s",
self.platform_name,
self.domain,
)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Schedule adding entities for a single platform, synchronously."""
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities,
list(new_entities),
update_before_add,
).result()
@callback
def _async_schedule_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Schedule adding entities for a single platform async."""
task = self.hass.async_create_task(
self.async_add_entities(new_entities, update_before_add=update_before_add),
)
if not self._setup_complete:
self._tasks.append(task)
def add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!"
)
asyncio.run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop,
).result()
async def async_add_entities(
self, new_entities: Iterable[Entity], update_before_add: bool = False
) -> None:
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = await hass.helpers.device_registry.async_get_registry()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
tasks = [
self._async_add_entity( # type: ignore
entity, update_before_add, entity_registry, device_registry
)
for entity in new_entities
]
# No entities for processing
if not tasks:
return
timeout = max(SLOW_ADD_ENTITY_MAX_WAIT * len(tasks), SLOW_ADD_MIN_TIMEOUT)
try:
async with self.hass.timeout.async_timeout(timeout, self.domain):
await asyncio.gather(*tasks)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out adding entities for domain %s with platform %s after %ds",
self.domain,
self.platform_name,
timeout,
)
except Exception:
self.logger.exception(
"Error adding entities for domain %s with platform %s",
self.domain,
self.platform_name,
)
raise
if self._async_unsub_polling is not None or not any(
entity.should_poll for entity in self.entities.values()
):
return
self._async_unsub_polling = async_track_time_interval(
self.hass,
self._update_entity_states,
self.scan_interval,
)
async def _async_add_entity( # type: ignore[no-untyped-def]
self, entity, update_before_add, entity_registry, device_registry
):
"""Add an entity to the platform."""
if entity is None:
raise ValueError("Entity cannot be None")
entity.add_to_platform_start(
self.hass,
self,
self._get_parallel_updates_semaphore(hasattr(entity, "async_update")),
)
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception("%s: Error on device update!", self.platform_name)
entity.add_to_platform_abort()
return
requested_entity_id = None
suggested_object_id: Optional[str] = None
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
requested_entity_id = entity.entity_id
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name
if self.entity_namespace is not None:
suggested_object_id = f"{self.entity_namespace} {suggested_object_id}"
if self.config_entry is not None:
config_entry_id: Optional[str] = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {"config_entry_id": config_entry_id}
for key in (
"connections",
"identifiers",
"manufacturer",
"model",
"name",
"default_manufacturer",
"default_model",
"default_name",
"sw_version",
"entry_type",
"via_device",
"suggested_area",
):
if key in device_info:
processed_dev_info[key] = device_info[key]
device = device_registry.async_get_or_create(**processed_dev_info)
if device:
device_id = device.id
disabled_by: Optional[str] = None
if not entity.entity_registry_enabled_default:
disabled_by = DISABLED_INTEGRATION
entry = entity_registry.async_get_or_create(
self.domain,
self.platform_name,
entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry=self.config_entry,
device_id=device_id,
known_object_ids=self.entities.keys(),
disabled_by=disabled_by,
capabilities=entity.capability_attributes,
supported_features=entity.supported_features,
device_class=entity.device_class,
unit_of_measurement=entity.unit_of_measurement,
original_name=entity.name,
original_icon=entity.icon,
)
entity.registry_entry = entry
entity.entity_id = entry.entity_id
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name
or entity.name
or f'"{self.platform_name} {entity.unique_id}"',
)
entity.add_to_platform_abort()
return
# We won't generate an entity ID if the platform has already set one
# We will however make sure that platform cannot pick a registered ID
elif entity.entity_id is not None and entity_registry.async_is_registered(
entity.entity_id
):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
entity.entity_id = None
# Generate entity ID
if entity.entity_id is None:
suggested_object_id = (
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
)
if self.entity_namespace is not None:
suggested_object_id = f"{self.entity_namespace} {suggested_object_id}"
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys()
)
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
entity.add_to_platform_abort()
raise HomeAssistantError(f"Invalid entity ID: {entity.entity_id}")
already_exists = entity.entity_id in self.entities
restored = False
if not already_exists and not self.hass.states.async_available(
entity.entity_id
):
existing = self.hass.states.get(entity.entity_id)
if existing is not None and ATTR_RESTORED in existing.attributes:
restored = True
else:
already_exists = True
if already_exists:
if entity.unique_id is not None:
msg = f"Platform {self.platform_name} does not generate unique IDs. "
if requested_entity_id:
msg += f"ID {entity.unique_id} is already used by {entity.entity_id} - ignoring {requested_entity_id}"
else:
msg += f"ID {entity.unique_id} already exists - ignoring {entity.entity_id}"
else:
msg = f"Entity id already exists - ignoring: {entity.entity_id}"
self.logger.error(msg)
entity.add_to_platform_abort()
return
entity_id = entity.entity_id
self.entities[entity_id] = entity
if not restored:
# Reserve the state in the state machine
# because as soon as we return control to the event
# loop below, another entity could be added
# with the same id before `entity.add_to_platform_finish()`
# has a chance to finish.
self.hass.states.async_reserve(entity.entity_id)
entity.async_on_remove(lambda: self.entities.pop(entity_id))
await entity.add_to_platform_finish()
async def async_reset(self) -> None:
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if not self.entities:
return
tasks = [entity.async_remove() for entity in self.entities.values()]
await asyncio.gather(*tasks)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
self._setup_complete = False
async def async_destroy(self) -> None:
"""Destroy an entity platform.
Call before discarding the object.
"""
await self.async_reset()
self.hass.data[DATA_ENTITY_PLATFORM][self.platform_name].remove(self)
async def async_remove_entity(self, entity_id: str) -> None:
"""Remove entity id from platform."""
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if self._async_unsub_polling is not None and not any(
entity.should_poll for entity in self.entities.values()
):
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_extract_from_service(
self, service_call: ServiceCall, expand_group: bool = True
) -> List[Entity]:
"""Extract all known and available entities from a service call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
return await service.async_extract_entities(
self.hass, self.entities.values(), service_call, expand_group
)
@callback
def async_register_entity_service(self, name, schema, func, required_features=None): # type: ignore[no-untyped-def]
"""Register an entity service.
Services will automatically be shared by all platforms of the same domain.
"""
if self.hass.services.has_service(self.platform_name, name):
return
if isinstance(schema, dict):
schema = cv.make_entity_service_schema(schema)
async def handle_service(call: ServiceCall) -> None:
"""Handle the service."""
await service.entity_service_call(
self.hass,
[
plf
for plf in self.hass.data[DATA_ENTITY_PLATFORM][self.platform_name]
if plf.domain == self.domain
],
func,
call,
required_features,
)
self.hass.services.async_register(
self.platform_name, name, handle_service, schema
)
async def _update_entity_states(self, now: datetime) -> None:
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates is None:
self._process_updates = asyncio.Lock()
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update interval %s",
self.platform_name,
self.domain,
self.scan_interval,
)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.gather(*tasks)
current_platform: ContextVar[Optional[EntityPlatform]] = ContextVar(
"current_platform", default=None
)
@callback
def async_get_platforms(
hass: HomeAssistantType, integration_name: str
) -> List[EntityPlatform]:
"""Find existing platforms."""
if (
DATA_ENTITY_PLATFORM not in hass.data
or integration_name not in hass.data[DATA_ENTITY_PLATFORM]
):
return []
platforms: List[EntityPlatform] = hass.data[DATA_ENTITY_PLATFORM][integration_name]
return platforms
|
|
from PyObjCTools.TestSupport import *
import objc
import array
import sys
from Foundation import *
from PyObjCTest.testhelper import PyObjC_TestClass3
if sys.version_info[0] == 3:
buffer = memoryview
def array_frombytes(a, b):
return a.frombytes(b)
def array_tobytes(a):
return a.tobytes()
else:
def array_frombytes(a, b):
return a.fromstring(b)
def array_tobytes(a):
return a.tostring()
try:
memoryview
except NameError:
memoryview = None
rawBytes = b"a\x13b\x00cd\xFFef\xEFgh"
otherBytes = array.array('B')
array_frombytes(otherBytes, b'12345678901234567890' * 5)
class TestNSData(TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSData.isEqualToData_)
self.assertResultIsBOOL(NSData.writeToFile_atomically_)
self.assertArgIsBOOL(NSData.writeToFile_atomically_, 1)
self.assertResultIsBOOL(NSData.writeToURL_atomically_)
self.assertArgIsBOOL(NSData.writeToURL_atomically_, 1)
self.assertResultIsBOOL(NSData.writeToFile_options_error_)
self.assertArgIsOut(NSData.writeToFile_options_error_, 2)
self.assertResultIsBOOL(NSData.writeToURL_options_error_)
self.assertArgIsOut(NSData.writeToURL_options_error_, 2)
self.assertArgIsOut(NSData.dataWithContentsOfFile_options_error_, 2)
self.assertArgIsOut(NSData.dataWithContentsOfURL_options_error_, 2)
self.assertArgIsOut(NSData.initWithContentsOfFile_options_error_, 2)
self.assertArgIsOut(NSData.initWithContentsOfURL_options_error_, 2)
def testConstants(self):
self.assertEqual(NSMappedRead, 1)
self.assertEqual(NSUncachedRead, 2)
self.assertEqual(NSAtomicWrite, 1)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertEqual(NSDataReadingMapped, 1<<0)
self.assertEqual(NSDataReadingUncached, 1<<1)
self.assertEqual(NSDataWritingAtomic, 1<<0)
self.assertEqual(NSDataSearchBackwards, 1<<0)
self.assertEqual(NSDataSearchAnchored, 1<<1)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertEqual(NSDataReadingMappedAlways, 1<<3)
@min_os_level('10.8')
def testConstants10_8(self):
self.assertEqual(NSDataWritingWithoutOverwriting, 1<<1)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertResultHasType(NSData.rangeOfData_options_range_, NSRange.__typestr__)
self.assertArgHasType(NSData.rangeOfData_options_range_, 2, NSRange.__typestr__)
def assertDataContents(self, d1, d2, rawData):
self.assertEqual(len(d1), d1.length(), "d1: len() and -length didn't match.")
self.assertEqual(len(d1), len(rawData), "d1: len(<data>) and len(<input>) didn't match. %d vs %d"%(len(d1), len(rawData)))
self.assertEqual(len(d2), d2.length(), "d2: len() and -length didn't match.")
self.assertEqual(len(d2), len(rawData), "d2: len(<data>) and len(<input>) didn't match. %d vs %d"%(len(d2), len(rawData)))
def testDataWithBytes_length_(self):
# Test +dataWithBytes:length
data = NSData.dataWithBytes_length_(rawBytes, len(rawBytes))
mutableData = NSMutableData.dataWithBytes_length_(rawBytes, len(rawBytes))
self.assertDataContents(data, mutableData, rawBytes)
def testAppendBytes_length_(self):
self.assertArgIsIn(NSMutableData.appendBytes_length_, 0)
self.assertArgSizeInArg(NSMutableData.appendBytes_length_, 0, 1)
def testreplaceBytesInRange_withBytes_(self):
self.assertArgIsIn(NSMutableData.replaceBytesInRange_withBytes_, 1)
self.assertArgSizeInArg(NSMutableData.replaceBytesInRange_withBytes_, 1, 0)
def testreplaceBytesInRange_withBytes_length_(self):
self.assertArgIsIn(NSMutableData.replaceBytesInRange_withBytes_length_, 1)
self.assertArgSizeInArg(NSMutableData.replaceBytesInRange_withBytes_length_, 1, 2)
def testDataWithBytesNoCopy_length_freeWhenDone_(self):
data = NSData.dataWithBytesNoCopy_length_freeWhenDone_(rawBytes, len(rawBytes), False)
mutableData = NSMutableData.dataWithBytesNoCopy_length_freeWhenDone_(rawBytes, len(rawBytes), False)
self.assertDataContents(data, mutableData, rawBytes)
def testInitWithBytes_length_(self):
# Test -initWithBytes:length:
data = NSData.alloc().initWithBytes_length_(rawBytes, len(rawBytes))
mutableData = NSMutableData.alloc().initWithBytes_length_(rawBytes, len(rawBytes))
self.assertDataContents(data, mutableData, rawBytes)
def testInitWithBytesNoCopy_length_freeWhenDone_(self):
# Test -initWithBytesNoCopy:length:
data = NSData.alloc().initWithBytesNoCopy_length_freeWhenDone_(rawBytes, len(rawBytes), False)
mutableData = NSMutableData.alloc().initWithBytesNoCopy_length_freeWhenDone_(rawBytes, len(rawBytes), False)
self.assertDataContents(data, mutableData, rawBytes)
def testBytes(self):
# Test -bytes
data = NSData.alloc().initWithBytes_length_(rawBytes, len(rawBytes))
bytesValue = data.bytes()
self.assertEqual(len(bytesValue), len(rawBytes), "bytes() and rawBytes not equal length.")
if sys.version_info[:2] <= (2,6):
self.assertEqual(buffer(rawBytes), bytesValue)
else:
self.assertEqual(rawBytes, bytesValue)
try:
bytesValue[3] = b'\xAE'
except TypeError as r:
if str(r).find('buffer is read-only') == 0:
pass
elif str(r).find('cannot modify read-only memory') == 0:
pass
else:
raise
def testMutableBytes(self):
# Test -mutableBytes
mutableData = NSMutableData.dataWithBytes_length_(rawBytes, len(rawBytes))
mutableBytes = mutableData.mutableBytes()
for i in range(0, len(mutableBytes)):
if sys.version_info[:2] >= (3,3):
mutableBytes[i] = array_tobytes(otherBytes[i:i+1])[0]
else:
mutableBytes[i] = array_tobytes(otherBytes[i:i+1])
mutableBytes[1:8] = array_tobytes(otherBytes[1:8])
try:
mutableBytes[2:10] = array_tobytes(otherBytes[1:5])
except (TypeError, ValueError) as r:
if str(r).find('right operand length must match slice length') == 0:
pass
elif 'cannot modify size of memoryview object' in str(r):
pass
elif 'ndarray assignment: lvalue and rvalue have different structures' in str(r):
pass
else:
raise
def testVariousDataLengths(self):
# Test data of different lengths.
#
# Data of different lengths may be stored in different subclasses within the class cluster.
testFactor = list(range(1, 64)) + [ 1000, 10000, 1000000]
for aFactor in testFactor:
bigRawBytes = b"1234567890" * aFactor
mutableData = NSMutableData.dataWithBytes_length_(bigRawBytes, len(bigRawBytes))
data = NSData.dataWithBytes_length_(bigRawBytes, len(bigRawBytes))
self.assertDataContents(data, mutableData, bigRawBytes)
mutableBytes = mutableData.mutableBytes()
bytes = data.bytes()
self.assertEqual(len(bytes), data.length())
self.assertEqual(len(mutableBytes), mutableData.length())
self.assertEqual(bytes, mutableBytes)
mutableBytes[0:len(mutableBytes)] = bytes[0:len(bytes)]
def testInitWithContents(self):
b, err = NSData.alloc().initWithContentsOfFile_options_error_(
"/etc/hosts", 0, None)
self.assertIsInstance(b, NSData)
self.assertIs(err, None)
b2, err = NSData.alloc().initWithContentsOfFile_options_error_(
"/etc/hosts.nosuchfile", 0, None)
self.assertIs(b2, None)
self.assertIsInstance(err, NSError)
url = NSURL.fileURLWithPath_isDirectory_('/etc/hosts', False)
b, err = NSData.alloc().initWithContentsOfURL_options_error_(
url, 0, None)
self.assertIsInstance(b, NSData)
self.assertIs(err, None)
url = NSURL.fileURLWithPath_isDirectory_('/etc/hosts.nosuchfile', False)
b2, err = NSData.alloc().initWithContentsOfURL_options_error_(
url, 0, None)
self.assertIs(b2, None)
self.assertIsInstance(err, NSError)
class MyData (NSData):
def dataWithBytes_length_(self, bytes, length):
return ("data", bytes, length)
BYTES="dummy bytes"
class MyData2 (NSData):
def initWithBytes_length_(self, bytes, length):
return ("init", bytes, length)
def length(self):
return 42
def bytes(self):
return BYTES
class MyData3 (NSData):
def initWithBytes_length_(self, bytes, length):
self._bytes = bytes
self._length = length
return self
def bytes(self):
return self._bytes
def length(self):
if hasattr(self, '_length'):
return self._length
return -1
class MyData4 (NSData):
def initWithBytes_length_(self, bytes, length):
return self
def bytes(self):
return None
def length(self):
return -1
class MyData5(NSData):
def initWithBytes_length_(self, bytes, length):
return self
def bytes(self):
raise ValueError("No bytes available")
def length(self):
return -1
class TestMyData (TestCase):
# 'initWithBytes:length:' and 'dataWithBytes:length:' have custom IMP's
def testData(self):
r = PyObjC_TestClass3.makeDataWithBytes_method_(MyData, 0)
self.assertEqual(r, ('data', b'hello world', 11))
def testInit(self):
r = PyObjC_TestClass3.makeDataWithBytes_method_(MyData2, 1)
self.assertEqual(r, ('init', b'hello world', 11))
def testBytes(self):
r = PyObjC_TestClass3.makeDataWithBytes_method_(MyData3, 1)
b = PyObjC_TestClass3.getBytes_(r)
# Check for memoryview
if isinstance(b.bytes(), memoryview):
self.assertEqual(b.bytes().tobytes(), b'hello world')
else:
self.assertEqual(bytes(b.bytes()), b'hello world')
self.assertEqual(b.getBytes_length_(None, 4), b'hell')
self.assertEqual(b.getBytes_range_(None, NSRange(2, 4)), b'llo ')
def testBytesNone(self):
b = PyObjC_TestClass3.makeDataWithBytes_method_(MyData4, 1)
self.assertEqual(b.bytes(), None)
def testBytesRaises(self):
b = PyObjC_TestClass3.makeDataWithBytes_method_(MyData5, 1)
self.assertRaises(ValueError, b.bytes)
import array
class TestBuffer(TestCase):
def testArray(self):
a = array.array('b', b'foo')
m = NSMutableData.dataWithData_(a)
self.assertEqual(array_tobytes(a), m[:])
self.assertTrue(objc.repythonify(a) is a)
array_frombytes(a, m)
self.assertEqual(array_tobytes(a), b'foofoo')
m.appendData_(a)
self.assertEqual(m[:], b'foofoofoo')
m[3:6] = b'bar'
self.assertEqual(m[:], b'foobarfoo')
def testBuffer(self):
if sys.version_info[0] == 3:
b = b'foo'
else:
b = buffer('foo')
m = NSMutableData.dataWithData_(b)
self.assertEqual(b[:], m[:])
self.assertTrue(objc.repythonify(b) is b)
self.assertEqual(buffer(m)[:], m[:])
class TestRegressions (TestCase):
def testDataStr(self):
if sys.version_info[0] == 2:
input = buffer("hello")
input_str = "hello"
else:
input = b"hello"
input_str = str(input)
buf = NSData.dataWithData_(input)
self.assertEqual(str(buf), input_str)
if __name__ == '__main__':
main( )
|
|
'''
Created on Jan 4, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import copy, datetime, isodate
from decimal import Decimal
try:
import regex as re
except ImportError:
import re
XmlUtil = None
def qname(value, name=None, noPrefixIsNoNamespace=False, castException=None, prefixException=None):
# either value can be an etree ModelObject element: if no name then qname is element tag quanem
# if name provided qname uses element as xmlns reference and name as prefixed name
# value can be namespaceURI and name is localname or prefix:localname
# value can be prefix:localname (and localname omitted)
# for xpath qnames which do not take default namespace if no prefix, specify noPrefixIsNoNamespace
if isinstance(value, ModelObject):
if name: # name is prefixed name
element = value # may be an attribute
value = name
name = None
else:
return QName(value.prefix, value.namespaceURI, value.localName)
elif isinstance(name, ModelObject):
element = name
name = None
else:
element = None
if isinstance(value,QName):
return value
elif not isinstance(value,_STR_BASE):
if castException: raise castException
return None
if value and value[0] == '{': # clark notation (with optional prefix)
namespaceURI,sep,prefixedLocalName = value[1:].rpartition('}')
prefix,sep,localName = prefixedLocalName.rpartition(':')
if not sep:
prefix = None
if isinstance(name, dict):
if namespaceURI in name:
prefix = name[namespaceURI]
else: # reverse lookup
for _prefix, _namespaceURI in name.items():
if _namespaceURI == namespaceURI:
prefix = _prefix
break
namespaceDict = None
else:
if isinstance(name, dict):
namespaceURI = None
namespaceDict = name # note that default prefix must be None, not '', in dict
elif name is not None:
if name: # len > 0
namespaceURI = value
else:
namespaceURI = None
namespaceDict = None
value = name
else:
namespaceURI = None
namespaceDict = None
prefix,sep,localName = value.strip().partition(":") # must be whitespace collapsed
if not sep:
localName = prefix
prefix = None # don't want '' but instead None if no prefix
if noPrefixIsNoNamespace:
return QName(None, None, localName)
if namespaceURI:
return QName(prefix, namespaceURI, localName)
elif namespaceDict and prefix in namespaceDict:
return QName(prefix, namespaceDict[prefix], localName)
elif element is not None:
# same as XmlUtil.xmlns but local for efficiency
namespaceURI = element.nsmap.get(prefix)
if not namespaceURI and prefix == 'xml':
namespaceURI = "http://www.w3.org/XML/1998/namespace"
if not namespaceURI:
if prefix:
if prefixException: raise prefixException
return None # error, prefix not found
namespaceURI = None # cancel namespace if it is a zero length string
return QName(prefix, namespaceURI, localName)
def qnameHref(href): # namespaceUri#localname
namespaceURI, _sep, localName = href.rpartition("#")
return QName(None, namespaceURI or None, localName)
def qnameNsLocalName(namespaceURI, localName): # does not handle localNames with prefix
return QName(None, namespaceURI or None, localName)
def qnameClarkName(clarkname): # does not handle clark names with prefix
if clarkname and clarkname[0] == '{': # clark notation (with optional prefix)
namespaceURI,sep,localName = clarkname[1:].rpartition('}')
return QName(None, namespaceURI or None, localName)
else:
return QName(None, None, clarkname)
def qnameEltPfxName(element, prefixedName, prefixException=None):
# check for href name style first
if "#" in prefixedName:
namespaceURI, _sep, localName = prefixedName.rpartition('#')
return QName(None, namespaceURI, localName)
# check for prefixed name style
prefix,_sep,localName = prefixedName.rpartition(':')
if not prefix:
prefix = None # don't want '' but instead None if no prefix
namespaceURI = element.nsmap.get(prefix)
if not namespaceURI:
if prefix:
if prefix == 'xml':
namespaceURI = "http://www.w3.org/XML/1998/namespace"
else:
if prefixException: raise prefixException
return None
else:
namespaceURI = None # cancel namespace if it is a zero length string
return QName(prefix, namespaceURI, localName)
class QName:
__slots__ = ("prefix", "namespaceURI", "localName", "qnameValueHash")
def __init__(self,prefix,namespaceURI,localName):
self.prefix = prefix
self.namespaceURI = namespaceURI
self.localName = localName
self.qnameValueHash = hash( (namespaceURI, localName) )
def __hash__(self):
return self.qnameValueHash
@property
def clarkNotation(self):
if self.namespaceURI:
return '{{{0}}}{1}'.format(self.namespaceURI, self.localName)
else:
return self.localName
@property
def expandedName(self):
return '{0}#{1}'.format(self.namespaceURI or "", self.localName)
def __repr__(self):
return self.__str__()
def __str__(self):
if self.prefix and self.prefix != '':
return self.prefix + ':' + self.localName
else:
return self.localName
def __eq__(self,other):
try:
return (self.qnameValueHash == other.qnameValueHash and
self.localName == other.localName and self.namespaceURI == other.namespaceURI)
except AttributeError:
return False
''' don't think this is used any longer
if isinstance(other,_STR_BASE):
# only compare nsnames {namespace}localname format, if other has same hash
return self.__hash__() == other.__hash__() and self.clarkNotation == other
elif isinstance(other,QName):
return self.qnameValueHash == other.qnameValueHash and \
self.namespaceURI == other.namespaceURI and self.localName == other.localName
elif isinstance(other,ModelObject):
return self.namespaceURI == other.namespaceURI and self.localName == other.localName
'''
'''
try:
return (self.qnameValueHash == other.qnameValueHash and
self.namespaceURI == other.namespaceURI and self.localName == other.localName)
except AttributeError: # other may be a model object and not a QName
try:
return self.namespaceURI == other.namespaceURI and self.localName == other.localName
except AttributeError:
return False
return False
'''
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return (self.namespaceURI is None and other.namespaceURI) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI < other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName < other.localName)
def __le__(self,other):
return (self.namespaceURI is None and other.namespaceURI) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI < other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName <= other.localName)
def __gt__(self,other):
return (self.namespaceURI and other.namespaceURI is None) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI > other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName > other.localName)
def __ge__(self,other):
return (self.namespaceURI and other.namespaceURI is None) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI > other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName >= other.localName)
def __bool__(self):
# QName object bool is false if there is no local name (even if there is a namespace URI).
return bool(self.localName)
from arelle.ModelObject import ModelObject
def anyURI(value):
return AnyURI(value)
class AnyURI(str):
def __new__(cls, value):
return str.__new__(cls, value)
datetimePattern = re.compile(r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})[T ]([0-9]{2}):([0-9]{2}):([0-9]{2})(\.[0-9]+)?(Z|[+-][0-9]{2}:[0-9]{2})?\s*|"
r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})(Z|[+-][0-9]{2}:[0-9]{2})?\s*")
timePattern = re.compile(r"\s*([0-9]{2}):([0-9]{2}):([0-9]{2})(\.[0-9]+)?(Z|[+-][0-9]{2}:[0-9]{2})?\s*")
durationPattern = re.compile(r"\s*(-?)P((-?[0-9]+)Y)?((-?[0-9]+)M)?((-?[0-9]+)D)?(T((-?[0-9]+)H)?((-?[0-9]+)M)?((-?[0-9.]+)S)?)?\s*")
DATE = 1
DATETIME = 2
DATEUNION = 3
def tzinfo(tz):
if tz is None:
return None
elif tz == 'Z':
return datetime.timezone(datetime.timedelta(0))
else:
return datetime.timezone(datetime.timedelta(hours=int(tz[0:3]), minutes=int(tz[0]+tz[4:6])))
def tzinfoStr(dt):
tz = str(dt.tzinfo or "")
if tz.startswith("UTC"):
return tz[3:] or "Z"
return ""
def dateTime(value, time=None, addOneDay=None, type=None, castException=None):
if value == "MinDate":
return DateTime(datetime.MINYEAR,1,1)
elif value == "maxyear":
return DateTime(datetime.MAXYEAR,12,31)
elif isinstance(value, ModelObject):
value = value.text
elif isinstance(value, DateTime) and not addOneDay and (value.dateOnly == (type == DATE)):
return value # no change needed for cast or conversion
elif isinstance(value, datetime.datetime):
if type == DATE:
dateOnly = True
elif type == DATETIME:
dateOnly = False
else:
dateOnly = isinstance(value, DateTime) and value.dateOnly
if addOneDay and not dateOnly:
addOneDay = False
return DateTime(value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond, tzinfo=value.tzinfo, dateOnly=dateOnly, addOneDay=addOneDay)
elif isinstance(value, datetime.date):
return DateTime(value.year, value.month, value.day,dateOnly=True,addOneDay=addOneDay)
elif castException and not isinstance(value, _STR_BASE):
raise castException("not a string value")
if value is None:
return None
match = datetimePattern.match(value.strip())
if match is None:
if castException:
raise castException("lexical pattern mismatch")
return None
if 6 <= match.lastindex <= 8:
if type == DATE:
if castException:
raise castException("date-only object has too many fields or contains time")
return None
ms = 0
fracSec = match.group(7)
if fracSec and fracSec[0] == ".":
ms = int(fracSec[1:7].ljust(6,'0'))
result = DateTime(int(match.group(1)),int(match.group(2)),int(match.group(3)),int(match.group(4)),int(match.group(5)),int(match.group(6)),ms,tzinfo(match.group(8)), dateOnly=False)
else:
if type == DATE or type == DATEUNION:
dateOnly = True
elif type == DATETIME:
dateOnly = False
else:
dateOnly = False
result = DateTime(int(match.group(9)),int(match.group(10)),int(match.group(11)),tzinfo=tzinfo(match.group(12)),dateOnly=dateOnly,addOneDay=addOneDay)
return result
def lastDayOfMonth(year, month):
if month in (1,3,5,7,8,10,12): return 31
if month in (4,6,9,11): return 30
if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0): return 29
return 28
#!!! see note in XmlUtil.py datetimeValue, may need exceptions handled or special treatment for end time of 9999-12-31
class DateTime(datetime.datetime):
def __new__(cls, y, m, d, hr=0, min=0, sec=0, microsec=0, tzinfo=None, dateOnly=None, addOneDay=None):
# note: does not support negative years but xml date does allow negative dates
lastDay = lastDayOfMonth(y, m)
# check day and month before adjustment
if not 1 <= m <= 12: raise ValueError("month must be in 1..12")
if not 1 <= d <= lastDay: raise ValueError("day is out of range for month")
if hr == 24:
if min != 0 or sec != 0 or microsec != 0: raise ValueError("hour 24 must have 0 mins and secs.")
hr = 0
d += 1
if addOneDay:
d += 1
if d > lastDay: d -= lastDay; m += 1
if m > 12: m = 1; y += 1
dateTime = datetime.datetime.__new__(cls, y, m, d, hr, min, sec, microsec, tzinfo)
dateTime.dateOnly = dateOnly
return dateTime
def __copy__(self):
return DateTime(self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond, self.tzinfo, self.dateOnly)
def __str__(self):
# note does not print negative dates but xml does allow negative date
tz = tzinfoStr(self)
if self.dateOnly:
return "{0.year:04}-{0.month:02}-{0.day:02}{1}".format(self, tz)
else:
return "{0.year:04}-{0.month:02}-{0.day:02}T{0.hour:02}:{0.minute:02}:{0.second:02}{1}".format(self, tz)
def addYearMonthDuration(self, other, sign):
m = self.month + sign * other.months - 1 # m is zero based now (0 - Jan, 11 - Dec)
y = self.year + sign * other.years + m // 12
m = (m % 12) + 1 # m back to 1 based (1 = Jan)
d = self.day
lastDay = lastDayOfMonth(y, m)
if d > lastDay: d = lastDay
return DateTime(y, m, d, self.hour, self.minute, self.second, self.microsecond, self.tzinfo, self.dateOnly)
def __add__(self, other):
if isinstance(other, YearMonthDuration):
return self.addYearMonthDuration(other, 1)
else:
if isinstance(other, Time): other = dayTimeDuration(other)
dt = super(DateTime, self).__add__(other)
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo, self.dateOnly)
def __sub__(self, other):
if isinstance(other, YearMonthDuration):
return self.addYearMonthDuration(other, -1)
else:
dt = super(DateTime, self).__sub__(other)
if isinstance(dt,datetime.timedelta):
return DayTimeDuration(dt.days, 0, 0, dt.seconds)
else:
if isinstance(other, Time): other = dayTimeDuration(other)
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo, self.dateOnly)
def dateUnionEqual(dateUnion1, dateUnion2, instantEndDate=False):
if isinstance(dateUnion1,DateTime):
if instantEndDate and dateUnion1.dateOnly:
dateUnion1 += datetime.timedelta(1)
elif isinstance(dateUnion1,datetime.date):
dateUnion1 = dateTime(dateUnion1, addOneDay=instantEndDate)
if isinstance(dateUnion2,DateTime):
if instantEndDate and dateUnion2.dateOnly:
dateUnion2 += datetime.timedelta(1)
elif isinstance(dateUnion2,datetime.date):
dateUnion2 = dateTime(dateUnion2, addOneDay=instantEndDate)
return dateUnion1 == dateUnion2
def dateunionDate(datetimeValue, subtractOneDay=False):
isDate = (hasattr(datetimeValue,'dateOnly') and datetimeValue.dateOnly) or not hasattr(datetimeValue, 'hour')
d = datetimeValue
if isDate or (d.hour == 0 and d.minute == 0 and d.second == 0):
if subtractOneDay and not isDate: d -= datetime.timedelta(1)
return datetime.date(d.year, d.month, d.day)
def yearMonthDuration(value):
minus, hasYr, yrs, hasMo, mos, hasDay, days, hasTime, hasHr, hrs, hasMin, mins, hasSec, secs = durationPattern.match(value).groups()
if hasDay or hasHr or hasMin or hasSec: raise ValueError
sign = -1 if minus else 1
return YearMonthDuration(sign * int(yrs if yrs else 0), sign * int(mos if mos else 0))
class YearMonthDuration():
def __init__(self, years, months):
self.years = years
self.months = months
def __repr__(self):
return self.__str__()
def __str__(self):
return "P{0}Y{1}M".format(self.years, self.months)
def dayTimeDuration(value):
if isinstance(value,Time):
return DayTimeDuration(1 if value.hour24 else 0, value.hour, value.minute, value.second)
if isinstance(value,datetime.timedelta):
return DayTimeDuration(value.days, 0, 0, value.seconds)
minus, hasYr, yrs, hasMo, mos, hasDay, days, hasTime, hasHr, hrs, hasMin, mins, hasSec, secs = durationPattern.match(value).groups()
if hasYr or hasMo: raise ValueError
sign = -1 if minus else 1
return DayTimeDuration(sign * int(days if days else 0), sign * int(hrs if hrs else 0), sign * int(mins if mins else 0), sign * int(secs if secs else 0))
class DayTimeDuration(datetime.timedelta):
def __new__(cls, days, hours, minutes, seconds):
dyTm = datetime.timedelta.__new__(cls,days,hours,minutes,seconds)
return dyTm
def dayHrsMinsSecs(self):
days = int(self.days)
if days < 0 and (self.seconds > 0 or self.microseconds > 0):
days -= 1
seconds = 86400 - self.seconds
if seconds > 0 and self.microseconds > 0:
microseconds = 1000000 - self.microseconds
seconds -= 1
elif self.microseconds > 0:
microseconds = 1000000 - self.microseconds
else:
seconds = self.seconds
microseconds = self.microseconds
# round up microseconds
if microseconds >= 500000:
seconds += 1
hours = int(seconds / 86400 )
if hours > 24:
days += hours / 24
hours = hours % 24
seconds -= hours * 86400
minutes = int(seconds / 60)
seconds -= minutes * 60
return (days, hours, minutes, seconds)
def __repr__(self):
return self.__str__()
def __str__(self):
x = self.dayHrsMinsSecs()
return "P{0}DT{1}H{2}M{3}S".format(x[0], x[1], x[2], x[3])
def yearMonthDayTimeDuration(value, value2=None):
if isinstance(value, datetime.datetime) and isinstance(value, datetime.datetime):
years = value2.year - value.year
months = value2.month - value.month
if months < 0:
years -= 1
months += 12
days = value2.day - value.day
if days < 0:
_lastDayPrevMonth = (value2 - datetime.timedelta(value2.day)).day
months -= 1
days = _lastDayPrevMonth + days
hours = value2.hour - value.hour
if hours < 0:
days -= 1
hours += 24
minutes = value2.minute - value.minute
if minutes < 0:
hours -= 1
minutes += 60
seconds = value2.second - value.second
if seconds < 0:
minutes -= 1
seconds += 60
return YearMonthDayTimeDuration(years, months, days, hours, minutes, seconds)
minus, hasYr, yrs, hasMo, mos, hasDay, days, hasTime, hasHr, hrs, hasMin, mins, hasSec, secs = durationPattern.match(value).groups()
sign = -1 if minus else 1
# TBD implement
return YearMonthDayTimeDuration(sign * int(yrs if yrs else 0), sign * int(mos if mos else 0))
class YearMonthDayTimeDuration():
def __init__(self, years, months, days, hours, minutes, seconds):
self.years = years
self.months = months
self.days = days
self.hours = hours
self.minutes = minutes
self.seconds = seconds
def __repr__(self):
return self.__str__()
def __str__(self):
per = []
if self.years: per.append("{}Y".format(self.years))
if self.months: per.append("{}Y".format(self.months))
if self.days: per.append("{}Y".format(self.days))
if self.hours or self.minutes or self.seconds: per.append('T')
if self.hours: per.append("{}Y".format(self.hours))
if self.minutes: per.append("{}Y".format(self.minutes))
if self.seconds: per.append("{}Y".format(self.seconds))
if not per:
return "PT0S"
return "P" + ''.join(per)
def time(value, castException=None):
if value == "MinTime":
return Time(time.min)
elif value == "MaxTime":
return Time(time.max)
elif isinstance(value, ModelObject):
value = value.text
elif isinstance(value, datetime.time):
return Time(value.hour, value.minute, value.second, value.microsecond, value.tzinfo)
elif isinstance(value, datetime.datetime):
return Time(value.hour, value.minute, value.second, value.microsecond, value.tzinfo)
elif castException and not isinstance(value, _STR_BASE):
raise castException
if value is None:
return None
match = timePattern.match(value.strip())
if match is None:
return None
ms = 0
fracSec = match.group(4)
if fracSec and fracSec[0] == ".":
ms = int(fracSec[1:7].ljust(6,'0'))
return Time(int(match.group(1)),int(match.group(2)),int(match.group(3)),ms,tzinfo(match.group(5)))
class Time(datetime.time):
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
hour24 = (hour == 24 and minute == 0 and second == 0 and microsecond == 0)
if hour24: hour = 0
time = datetime.time.__new__(cls, hour, minute, second, microsecond, tzinfo)
time.hour24 = hour24
return time
class gYearMonth():
def __init__(self, year, month):
self.year = int(year) # may be negative
self.month = int(month)
def __repr__(self):
return self.__str__()
def __str__(self):
return "{0:0{2}}-{1:02}".format(self.year, self.month, 5 if self.year < 0 else 4) # may be negative
def __eq__(self,other):
return type(other) == gYearMonth and self.year == other.year and self.month == other.month
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return type(other) == gYearMonth and ((self.year < other.year) or (self.year == other.year and self.month < other.month))
def __le__(self,other):
return type(other) == gYearMonth and ((self.year <= other.year) or (self.year == other.year and self.month <= other.month))
def __gt__(self,other):
return type(other) == gYearMonth and ((self.year > other.year) or (self.year == other.year and self.month > other.month))
def __ge__(self,other):
return type(other) == gYearMonth and ((self.year >= other.year) or (self.year == other.year and self.month >= other.month))
def __bool__(self):
return self.year != 0 or self.month != 0
class gMonthDay():
def __init__(self, month, day):
self.month = int(month)
self.day = int(day)
def __repr__(self):
return self.__str__()
def __str__(self):
return "--{0:02}-{1:02}".format(self.month, self.day)
def __eq__(self,other):
return type(other) == gMonthDay and self.month == other.month and self.day == other.day
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return type(other) == gMonthDay and ((self.month < other.month) or (self.month == other.month and self.day < other.day))
def __le__(self,other):
return type(other) == gMonthDay and ((self.month <= other.month) or (self.month == other.month and self.day <= other.day))
def __gt__(self,other):
return type(other) == gMonthDay and ((self.month > other.month) or (self.month == other.month and self.day > other.day))
def __ge__(self,other):
return type(other) == gMonthDay and ((self.month >= other.month) or (self.month == other.month and self.day >= other.day))
def __bool__(self):
return self.month != 0 or self.day != 0
class gYear():
def __init__(self, year):
self.year = int(year) # may be negative
def __repr__(self):
return self.__str__()
def __str__(self):
return "{0:0{1}}".format(self.year, 5 if self.year < 0 else 4) # may be negative
def __eq__(self,other):
return type(other) == gYear and self.year == other.year
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return type(other) == gYear and self.year < other.year
def __le__(self,other):
return type(other) == gYear and self.year <= other.year
def __gt__(self,other):
return type(other) == gYear and self.year > other.year
def __ge__(self,other):
return type(other) == gYear and self.year >= other.year
def __bool__(self):
return self.year != 0 != 0
class gMonth():
def __init__(self, month):
self.month = int(month)
def __repr__(self):
return self.__str__()
def __str__(self):
return "--{0:02}".format(self.month)
def __eq__(self,other):
return type(other) == gMonth and self.month == other.month
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return type(other) == gMonth and self.month < other.month
def __le__(self,other):
return type(other) == gMonth and self.month <= other.month
def __gt__(self,other):
return type(other) == gMonth and self.month > other.month
def __ge__(self,other):
return type(other) == gMonth and self.month >= other.month
def __bool__(self):
return self.month != 0
class gDay():
def __init__(self, day):
self.day = int(day)
def __repr__(self):
return self.__str__()
def __str__(self):
return "---{0:02}".format(self.day)
def __eq__(self,other):
return type(other) == gDay and self.day == other.day
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return type(other) == gDay and self.day < other.day
def __le__(self,other):
return type(other) == gDay and self.day <= other.day
def __gt__(self,other):
return type(other) == gDay and self.day > other.day
def __ge__(self,other):
return type(other) == gDay and self.day >= other.day
def __bool__(self):
return self.day != 0
isoDurationPattern = re.compile(
r"^(?P<sign>[+-])?"
r"P(?!\b)"
r"(?P<years>[0-9]+([,.][0-9]+)?Y)?"
r"(?P<months>[0-9]+([,.][0-9]+)?M)?"
r"(?P<weeks>[0-9]+([,.][0-9]+)?W)?"
r"(?P<days>[0-9]+([,.][0-9]+)?D)?"
r"((?P<separator>T)(?P<hours>[0-9]+([,.][0-9]+)?H)?"
r"(?P<minutes>[0-9]+([,.][0-9]+)?M)?"
r"(?P<seconds>[0-9]+([,.][0-9]+)?S)?)?$")
def isoDuration(value):
"""(str) -- Text of contained (inner) text nodes except for any whose localName
starts with URI, for label and reference parts displaying purposes.
(Footnotes, which return serialized html content of footnote.)
"""
if not isinstance(value, str):
raise TypeError("Expecting a string {}".format(value))
match = isoDurationPattern.match(value)
if not match:
raise ValueError("Unable to parse duration string {}".format(value))
groups = match.groupdict()
for key, val in list(groups.items()):
if key not in ('separator', 'sign'):
if val is None:
groups[key] = "0n"
# print groups[key]
if key in ('years', 'months'):
groups[key] = Decimal(groups[key][:-1].replace(',', '.'))
else:
# these values are passed into a timedelta object,
# which works with floats.
groups[key] = float(groups[key][:-1].replace(',', '.'))
return IsoDuration(years=groups["years"], months=groups["months"],
days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"],
negate=(groups["sign"]=='-'),
sourceValue=value) # preserve source lexical value for str() value
DAYSPERMONTH = Decimal("30.4375") # see: https://www.ibm.com/support/knowledgecenter/SSLVMB_20.0.0/com.ibm.spss.statistics.help/alg_adp_date-time_handling.htm
"""
XPath 2.0 does not define arithmetic operations on xs:duration
for arithmetic one must use xs:yearMonthDuration or xs:dayTimeDuration instead
Arelle provides value comparisons for xs:duration even though they are not "totally ordered" per XPath 1.0
because these are necessary in order to define typed dimension ordering
"""
class IsoDuration(isodate.Duration):
"""
.. class:: IsoDuration(modelDocument)
Implements custom class for xs:duration to work for typed dimensions
Uses DAYSPERMONTH approximation of ordering days/months (only for typed dimensions, not XPath).
For formula purposes this object is not used because xpath 1.0 requires use of
xs:yearMonthDuration or xs:dayTimeDuration which are totally ordered instead.
"""
def __init__(self, days=0, seconds=0, microseconds=0, milliseconds=0,
minutes=0, hours=0, weeks=0, months=0, years=0,
negate=False, sourceValue=None):
super(IsoDuration, self).__init__(days, seconds, microseconds, milliseconds,
minutes, hours, weeks, months, years)
if negate:
self.years = -self.years
self.months = -self.months
self.tdelta = -self.tdelta
self.sourceValue = sourceValue
self.avgdays = (self.years * 12 + self.months) * DAYSPERMONTH + self.tdelta.days
self._hash = hash((self.avgdays, self.tdelta))
def __hash__(self):
return self._hash
def __eq__(self,other):
try:
return self.avgdays == other.avgdays and self.tdelta.seconds == other.tdelta.seconds and self.tdelta.microseconds == other.tdelta.microseconds
except AttributeError:
return False
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
if self.avgdays < other.avgdays:
return True
elif self.avgdays == other.avgdays:
if self.tdelta.seconds < other.tdelta.seconds:
return True
elif self.tdelta.seconds == other.tdelta.seconds:
if self.tdelta.microseconds < other.tdelta.microseconds:
return True
return False
def __le__(self,other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self,other):
if self.avgdays > other.avgdays:
return True
elif self.avgdays > other.avgdays:
if self.tdelta.seconds > other.tdelta.seconds:
return True
elif self.tdelta.seconds == other.tdelta.seconds:
if self.tdelta.microseconds > other.tdelta.microseconds:
return True
return False
def __ge__(self,other):
return self.__gt__(other) or self.__eq__(other)
def viewText(self, labelrole=None, lang=None):
return super(IsoDuration, self).__str__() # textual words form of duration
def __str__(self):
return self.sourceValue
class InvalidValue(str):
def __new__(cls, value):
return str.__new__(cls, value)
INVALIDixVALUE = InvalidValue("(ixTransformValueError)")
|
|
import logging
from threading import local
from django.conf import settings
from django.core import signals
from django.db import models
from django.db.models.signals import pre_delete, post_save, m2m_changed
from django.dispatch import receiver
from elasticutils.contrib.django import MappingType, Indexable, MLT
from elasticsearch.exceptions import NotFoundError
from kitsune.search import es_utils
from kitsune.search.tasks import index_task, unindex_task
from kitsune.sumo.models import ModelBase
log = logging.getLogger('k.search.es')
# db_table_name -> MappingType class
_search_mapping_types = {}
def get_mapping_types(mapping_types=None):
"""Returns a list of MappingTypes"""
if mapping_types is None:
values = _search_mapping_types.values()
else:
values = [_search_mapping_types[name] for name in mapping_types]
# Sort to stabilize
values.sort(key=lambda cls: cls.get_mapping_type_name())
return values
# Holds a threadlocal set of indexing tasks to be filed after the request.
_local = local()
def _local_tasks():
"""(Create and) return the threadlocal set of indexing tasks."""
if getattr(_local, 'tasks', None) is None:
_local.tasks = set()
return _local.tasks
class SearchMixin(object):
"""A mixin which adds ES indexing support for the model
When using this mixin, make sure to implement:
* get_mapping_type
Additionally, after defining your model, remember to register it and any
related models which affect it::
register_for_indexing(MyModel, 'some_app')
register_for_indexing(RelatedModel, 'some_app',
instance_to_indexee=lambda r: r.my_model)
"""
@classmethod
def get_mapping_type(cls):
"""Return the MappingType for this model"""
raise NotImplementedError
def index_later(self):
"""Register myself to be indexed at the end of the request."""
_local_tasks().add((index_task.delay,
(self.get_mapping_type(), (self.pk,))))
def unindex_later(self):
"""Register myself to be unindexed at the end of the request."""
_local_tasks().add((unindex_task.delay,
(self.get_mapping_type(), (self.pk,))))
class SearchMappingType(MappingType, Indexable):
"""Contains helpers on top of what ElasticUtils provides
Subclasses should implement the following:
1. get_mapping needs to return {'properties': { ... fields ... }}
2. get_query_fields should return a list of fields for query
3. extract_document
4. get_model
5. the mapping type class should be decorated with
``@register_mapping_type``
Then make sure to:
6. implement get_mapping_type on the related model
"""
list_keys = []
@classmethod
def search(cls):
return es_utils.Sphilastic(cls)
@classmethod
def get_index(cls):
return es_utils.write_index(cls.get_index_group())
@classmethod
def get_index_group(cls):
return 'default'
@classmethod
def get_query_fields(cls):
"""Return the list of fields for query"""
raise NotImplementedError
@classmethod
def get_localized_fields(cls):
return []
@classmethod
def get_indexable(cls):
# Some models have a gazillion instances. So we want to go
# through them one at a time in a way that doesn't pull all
# the data into memory all at once. So we iterate through ids
# and pull objects one at a time.
return cls.get_model().objects.order_by('pk').values_list(
'pk', flat=True)
@classmethod
def reshape(cls, results):
"""Reshapes the results so lists are lists and everything is not"""
# FIXME: This is dumb because we're changing the shape of the
# results multiple times in a hokey-pokey kind of way. We
# should fix this after SUMO is using Elasticsearch 1.x and it
# probably involves an ElasticUtils rewrite or whatever the
# next generation is.
list_keys = cls.list_keys
# FIXME: This builds a new dict from the old dict. Might be
# cheaper to do it in-place.
return [
dict((key, (val if key in list_keys else val[0]))
for key, val in result.items())
for result in results
]
@classmethod
def index(cls, *args, **kwargs):
if not settings.ES_LIVE_INDEXING:
return
super(SearchMappingType, cls).index(*args, **kwargs)
@classmethod
def unindex(cls, *args, **kwargs):
if not settings.ES_LIVE_INDEXING:
return
try:
super(SearchMappingType, cls).unindex(*args, **kwargs)
except NotFoundError:
# Ignore the case where we try to delete something that's
# not there.
pass
@classmethod
def morelikethis(cls, id_, s, fields):
"""MoreLikeThis API"""
return list(MLT(id_, s, fields, min_term_freq=1, min_doc_freq=1))
def _identity(s):
return s
def register_for_indexing(app,
sender_class,
instance_to_indexee=_identity,
m2m=False):
"""Registers a model for signal-based live-indexing.
As data changes in the database, we need to update the relevant
documents in the index. This function registers Django model
classes with the appropriate signals and update/delete routines
such that our index stays up-to-date.
:arg app: A bit of UID we use to build the signal handlers'
dispatch_uids. This is prepended to the ``sender_class``
model name, "elastic", and the signal name, so it should
combine with those to make something unique. For this reason,
the app name is usually a good choice, yielding something like
"wiki.TaggedItem.elastic.post_save".
:arg sender_class: The class to listen for saves and deletes on.
:arg instance_to_indexee: A callable which takes the signalling
instance and returns the model instance to be indexed. The
returned instance should be a subclass of SearchMixin. If the
callable returns None, no indexing is performed.
Default: a callable which returns the sender itself.
:arg m2m: True if this is a m2m model and False otherwise.
Examples::
# Registers MyModel for indexing. post_save creates new
# documents in the index. pre_delete removes documents
# from the index.
register_for_indexing(MyModel, 'some_app')
# Registers RelatedModel for indexing. RelatedModel is related
# to some model in the sense that the document in the index is
# composed of data from some model and it's related
# RelatedModel instance. Because of that when we update
# RelatedModel instances, we need to update the associated
# document in the index for the related model.
#
# This registers the RelatedModel for indexing. post_save and
# pre_delete update the associated document in the index for
# the related model. The related model instance is determined
# by the instance_to_indexee function.
register_for_indexing(RelatedModel, 'some_app',
instance_to_indexee=lambda r: r.my_model)
"""
def maybe_call_method(instance, is_raw, method_name):
"""Call an (un-)indexing method on instance if appropriate."""
obj = instance_to_indexee(instance)
if obj is not None and not is_raw:
getattr(obj, method_name)()
def update(sender, instance, **kw):
"""File an add-to-index task for the indicated object."""
maybe_call_method(instance, kw.get('raw'), 'index_later')
def delete(sender, instance, **kw):
"""File a remove-from-index task for the indicated object."""
maybe_call_method(instance, kw.get('raw'), 'unindex_later')
def indexing_receiver(signal, signal_name):
"""Return a routine that registers signal handlers for indexers.
The returned registration routine uses strong refs, makes up a
dispatch_uid, and uses ``sender_class`` as the sender.
"""
return receiver(
signal,
sender=sender_class,
dispatch_uid='%s.%s.elastic.%s' %
(app, sender_class.__name__, signal_name),
weak=False)
if m2m:
# This is an m2m model, so we regstier m2m_chaned and it
# updates the existing document in the index.
indexing_receiver(m2m_changed, 'm2m_changed')(update)
else:
indexing_receiver(post_save, 'post_save')(update)
indexing_receiver(pre_delete, 'pre_delete')(
# If it's the indexed instance that's been deleted, go ahead
# and delete it from the index. Otherwise, we just want to
# update whatever model it's related to.
delete if instance_to_indexee is _identity else update)
def register_mapping_type(cls):
"""Class decorator for registering MappingTypes for search"""
_search_mapping_types[cls.get_mapping_type_name()] = cls
return cls
def generate_tasks(**kwargs):
"""Goes through thread local index update tasks set and generates
celery tasks for all tasks in the set.
Because this works off of a set, it naturally de-dupes the tasks,
so if four tasks get tossed into the set that are identical, we
execute it only once.
"""
tasks = _local_tasks()
for fun, args in tasks:
fun(*args)
tasks.clear()
signals.request_finished.connect(generate_tasks)
class Record(ModelBase):
"""Record for the reindexing log"""
starttime = models.DateTimeField(null=True)
endtime = models.DateTimeField(null=True)
text = models.CharField(max_length=255)
class Meta:
permissions = (
('reindex', 'Can run a full reindexing'),
)
def delta(self):
"""Returns the timedelta"""
if self.starttime and self.endtime:
return self.endtime - self.starttime
return None
class Synonym(ModelBase):
"""To be serialized into ES for synonyms."""
from_words = models.CharField(max_length=1024)
to_words = models.CharField(max_length=1024)
def __unicode__(self):
return u'{0} => {1}'.format(self.from_words, self.to_words)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import buildbot_page_measurement_results
from telemetry.page import page_set
from telemetry.page import perf_tests_helper
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
def _MakePageSet():
return page_set.PageSet.FromDict({
"description": "hello",
"archive_path": "foo.wpr",
"pages": [
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"},
{"url": "http://www.baz.com/"}
]
}, os.path.dirname(__file__))
class SummarySavingPageMeasurementResults(
buildbot_page_measurement_results.BuildbotPageMeasurementResults):
def __init__(self, trace_tag=''):
super(SummarySavingPageMeasurementResults, self).__init__(
None, trace_tag=trace_tag)
self.results = []
def _PrintPerfResult(self, *args):
res = perf_tests_helper.PrintPerfResult(*args, print_to_stdout=False)
self.results.append(res)
class BuildbotPageMeasurementResultsTest(unittest.TestCase):
def assertEquals(self, ex, res):
# This helps diagnose result mismatches.
if ex != res and isinstance(ex, list):
def CleanList(l):
res = []
for x in l:
x = x.split('\n')
res.extend(x)
return res
ex = CleanList(ex)
res = CleanList(res)
max_len = max(len(ex), len(res))
max_width = max([len(x) for x in ex + res])
max_width = max(10, max_width)
print "Lists differ!"
print '%*s | %*s' % (max_width, 'expected', max_width, 'result')
for i in range(max_len):
if i < len(ex):
e = ex[i]
else:
e = ''
if i < len(res):
r = res[i]
else:
r = ''
if e != r:
sep = '*'
else:
sep = '|'
print '%*s %s %*s' % (max_width, e, sep, max_width, r)
print ""
super(BuildbotPageMeasurementResultsTest, self).assertEquals(ex, res)
def test_basic_summary(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= 7 seconds',
'RESULT a_by_url: http___www.foo.com_= 3 seconds',
'*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' +
'Sd a: 2.828427seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_with_only_one_page(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.PrintSummary()
expected = ['*RESULT a: a= 3 seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_nonuniform_results(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.Add('b', 'seconds', 10)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 3)
measurement_results.Add('b', 'seconds', 10)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[2])
measurement_results.Add('a', 'seconds', 7)
# Note, page[2] does not report a 'b' metric.
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[2])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= 3 seconds',
'RESULT a_by_url: http___www.baz.com_= 7 seconds',
'RESULT a_by_url: http___www.foo.com_= 3 seconds',
'*RESULT a: a= [3,3,7] seconds\nAvg a: 4.333333seconds\n' +
'Sd a: 2.309401seconds',
'RESULT b_by_url: http___www.bar.com_= 10 seconds',
'RESULT b_by_url: http___www.foo.com_= 10 seconds',
'*RESULT b: b= [10,10] seconds\nAvg b: 10.000000seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_pass_and_fail_page(self):
"""If a page failed, only print summary for individual pages."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddFailureMessage(test_page_set.pages[0], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= 7 seconds',
'RESULT a_by_url: http___www.foo.com_= 3 seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 1 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset_one_iteration_one_page_fails(self):
"""Page fails on one iteration, no averaged results should print."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddFailureMessage(test_page_set.pages[1], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' +
'Avg a_by_url: 7.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' +
'Avg a_by_url: 3.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 1 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset_one_iteration_one_page_error(self):
"""Page error on one iteration, no averaged results should print."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddErrorMessage(test_page_set.pages[1], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' +
'Avg a_by_url: 7.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' +
'Avg a_by_url: 3.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 1 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' +
'Avg a_by_url: 7.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' +
'Avg a_by_url: 3.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'*RESULT a: a= [3,7,4,8] seconds\n' +
'Avg a: 5.500000seconds\n' +
'Sd a: 2.380476seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count'
]
self.assertEquals(expected, measurement_results.results)
def test_repeated_pages(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a_by_url: http___www.bar.com_= [7,8] seconds\n' +
'Avg a_by_url: 7.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'RESULT a_by_url: http___www.foo.com_= [3,4] seconds\n' +
'Avg a_by_url: 3.500000seconds\n' +
'Sd a_by_url: 0.707107seconds',
'*RESULT a: a= [3,4,7,8] seconds\n' +
'Avg a: 5.500000seconds\n' +
'Sd a: 2.380476seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count'
]
self.assertEquals(expected, measurement_results.results)
def test_overall_results_trace_tag(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults(trace_tag='_ref')
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'c', 'seconds', 4))
measurement_results.PrintSummary()
expected = [
'*RESULT b: b_ref= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'*RESULT a: a_ref= 1 seconds',
'*RESULT c: c_ref= 4 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_overall_results_page_runs_twice(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.PrintSummary()
expected = [
'RESULT b_by_url: http___www.foo.com_= [2,3] seconds\n' +
'Avg b_by_url: 2.500000seconds\nSd b_by_url: 0.707107seconds',
'*RESULT b: b= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'*RESULT a: a= 1 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_unimportant_results(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1, important=False))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2, data_type='unimportant')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', 3, data_type='unimportant')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
self.assertEquals(
measurement_results.results,
['RESULT b_by_url: http___www.bar.com_= 3 seconds',
'RESULT b_by_url: http___www.foo.com_= 2 seconds',
'RESULT b: b= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'RESULT a: a= 1 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count'])
def test_list_value(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
list_of_scalar_values.ListOfScalarValues(None, 'a', 'seconds', [1, 1]))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', [2, 2])
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', [3, 3])
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = [
'RESULT b_by_url: http___www.bar.com_= [3,3] seconds\n' +
'Avg b_by_url: 3.000000seconds',
'RESULT b_by_url: http___www.foo.com_= [2,2] seconds\n' +
'Avg b_by_url: 2.000000seconds',
'*RESULT b: b= [2,2,3,3] seconds\nAvg b: 2.500000seconds\n' +
'Sd b: 0.577350seconds',
'*RESULT a: a= [1,1] seconds\nAvg a: 1.000000seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_histogram(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'units',
'{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
data_type='unimportant-histogram')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'units',
'{"buckets": [{"low": 2, "high": 3, "count": 1}]}',
data_type='unimportant-histogram')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = [
'HISTOGRAM a_by_url: http___www.bar.com_= ' +
'{"buckets": [{"low": 2, "high": 3, "count": 1}]} units\n' +
'Avg a_by_url: 2.500000units',
'HISTOGRAM a_by_url: http___www.foo.com_= ' +
'{"buckets": [{"low": 1, "high": 2, "count": 1}]} units\n' +
'Avg a_by_url: 1.500000units',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from six.moves.urllib import parse as urlparse
from sahara import conductor as c
from sahara import context
from sahara.plugins import base as plugin_base
from sahara.plugins import provisioning
from sahara.service import quotas
from sahara.utils import general as g
from sahara.utils.notification import sender
from sahara.utils.openstack import nova
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
OPS = None
def setup_service_api(ops):
global OPS
OPS = ops
# Cluster ops
def get_clusters(**kwargs):
return conductor.cluster_get_all(context.ctx(), **kwargs)
def get_cluster(id, show_progress=False):
return conductor.cluster_get(context.ctx(), id, show_progress)
def scale_cluster(id, data):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
existing_node_groups = data.get('resize_node_groups', [])
additional_node_groups = data.get('add_node_groups', [])
# the next map is the main object we will work with
# to_be_enlarged : {node_group_id: desired_amount_of_instances}
to_be_enlarged = {}
for ng in existing_node_groups:
ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
to_be_enlarged.update({ng_id: ng['count']})
additional = construct_ngs_for_scaling(cluster, additional_node_groups)
cluster = conductor.cluster_get(ctx, cluster)
_add_ports_for_auto_sg(ctx, cluster, plugin)
try:
cluster = g.change_cluster_status(cluster, "Validating")
quotas.check_scaling(cluster, to_be_enlarged, additional)
plugin.validate_scaling(cluster, to_be_enlarged, additional)
except Exception as e:
with excutils.save_and_reraise_exception():
g.clean_cluster_from_empty_ng(cluster)
g.change_cluster_status(cluster, "Active", six.text_type(e))
# If we are here validation is successful.
# So let's update to_be_enlarged map:
to_be_enlarged.update(additional)
for node_group in cluster.node_groups:
if node_group.id not in to_be_enlarged:
to_be_enlarged[node_group.id] = node_group.count
OPS.provision_scaled_cluster(id, to_be_enlarged)
return cluster
def create_cluster(values):
ctx = context.ctx()
cluster = conductor.cluster_create(ctx, values)
sender.notify(ctx, cluster.id, cluster.name, "New",
"create")
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
_add_ports_for_auto_sg(ctx, cluster, plugin)
# validating cluster
try:
cluster = g.change_cluster_status(cluster, "Validating")
quotas.check_cluster(cluster)
plugin.validate(cluster)
except Exception as e:
with excutils.save_and_reraise_exception():
g.change_cluster_status(cluster, "Error",
six.text_type(e))
OPS.provision_cluster(cluster.id)
return cluster
def _add_ports_for_auto_sg(ctx, cluster, plugin):
for ng in cluster.node_groups:
if ng.auto_security_group:
ports = {'open_ports': plugin.get_open_ports(ng)}
conductor.node_group_update(ctx, ng, ports)
def terminate_cluster(id):
cluster = g.change_cluster_status(id, "Deleting")
OPS.terminate_cluster(id)
sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
"delete")
# ClusterTemplate ops
def get_cluster_templates(**kwargs):
return conductor.cluster_template_get_all(context.ctx(), **kwargs)
def get_cluster_template(id):
return conductor.cluster_template_get(context.ctx(), id)
def create_cluster_template(values):
return conductor.cluster_template_create(context.ctx(), values)
def terminate_cluster_template(id):
return conductor.cluster_template_destroy(context.ctx(), id)
def update_cluster_template(id, values):
return conductor.cluster_template_update(context.ctx(), id, values)
# NodeGroupTemplate ops
def get_node_group_templates(**kwargs):
return conductor.node_group_template_get_all(context.ctx(), **kwargs)
def get_node_group_template(id):
return conductor.node_group_template_get(context.ctx(), id)
def create_node_group_template(values):
return conductor.node_group_template_create(context.ctx(), values)
def terminate_node_group_template(id):
return conductor.node_group_template_destroy(context.ctx(), id)
def update_node_group_template(id, values):
return conductor.node_group_template_update(context.ctx(), id, values)
# Plugins ops
def get_plugins():
return plugin_base.PLUGINS.get_plugins(
base=provisioning.ProvisioningPluginBase)
def get_plugin(plugin_name, version=None):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
if plugin:
res = plugin.as_resource()
if version:
if version in plugin.get_versions():
configs = plugin.get_configs(version)
res._info['configs'] = [c.dict for c in configs]
processes = plugin.get_node_processes(version)
res._info['node_processes'] = processes
required_image_tags = plugin.get_required_image_tags(version)
res._info['required_image_tags'] = required_image_tags
else:
return None
return res
def convert_to_cluster_template(plugin_name, version, template_name,
config_file):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
return plugin.convert(config_file, plugin_name, version,
urlparse.unquote(template_name),
conductor.cluster_template_create)
def construct_ngs_for_scaling(cluster, additional_node_groups):
ctx = context.ctx()
additional = {}
for ng in additional_node_groups:
count = ng['count']
ng['count'] = 0
ng_id = conductor.node_group_add(ctx, cluster, ng)
additional.update({ng_id: count})
return additional
# Image Registry
def get_images(name, tags):
return nova.client().images.list_registered(name, tags)
def get_image(**kwargs):
if len(kwargs) == 1 and 'id' in kwargs:
return nova.client().images.get(kwargs['id'])
else:
return nova.client().images.find(**kwargs)
def get_registered_image(id):
return nova.client().images.get_registered_image(id)
def register_image(image_id, username, description=None):
client = nova.client()
client.images.set_description(image_id, username, description)
return client.images.get(image_id)
def unregister_image(image_id):
client = nova.client()
client.images.unset_description(image_id)
return client.images.get(image_id)
def add_image_tags(image_id, tags):
client = nova.client()
client.images.tag(image_id, tags)
return client.images.get(image_id)
def remove_image_tags(image_id, tags):
client = nova.client()
client.images.untag(image_id, tags)
return client.images.get(image_id)
|
|
from datetime import datetime, timezone
from unittest.mock import patch
from rest_framework.test import APITestCase, APIClient
task_keys = ['id',
'task_def',
'status',
'worker_id',
'locked_at',
'priority',
'unique',
'run_at',
'started_at',
'completed_at',
'failed_at',
'data',
'attempts',
'created_at',
'updated_at']
class TaskTests(APITestCase):
def setUp(self):
client = APIClient()
self.token = 'JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzZXJ2aWNlIjoiY29yZSJ9.HHlbWMjo-Y__DGV0DAiCY7u85FuNtY8wpovcZ9ga-oCsLdM2H5iVSz1vKiWK8zxl7dSYltbnyTNMxXO2cDS81hr4ohycr7YYg5CaE5sA5id73ab5T145XEdF5X_HXoeczctGq7X3x9QYSn7O1fWJbPWcIrOCs6T2DrySsYgjgdAAnWnKedy_dYWJ0YtHY1bXH3Y7T126QqVlQ9ylHk6hmFMCtxMPbuAX4YBJsxwjWpMDpe13xbaU0Uqo5N47a2_vi0XzQ_tzH5esLeFDl236VqhHRTIRTKhPTtRbQmXXy1k-70AU1FJewVrQddxbzMXJLFclStIdG_vW1dWdqhh-hQ'
client.credentials(HTTP_AUTHORIZATION=self.token)
task_def_response = client.post('/task-defs', {'name': 'classifier-search'}, format='json')
self.assertEqual(task_def_response.status_code, 201)
self.task_def = task_def_response.data
self.task_def_name = self.task_def['name']
@patch('django.utils.timezone.now')
def test_queueing(self, mocked_now):
test_datetime = datetime.utcnow().isoformat() + 'Z'
mocked_now.return_value = test_datetime
task_post_data = {
'task_def': self.task_def_name,
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(response.status_code, 201)
self.assertEqual(list(response.data.keys()), task_keys)
self.assertEqual(response.data['task_def'], self.task_def_name)
## test fields defaults
self.assertEqual(response.data['status'], 'queued')
self.assertEqual(response.data['priority'], 'normal')
self.assertEqual(response.data['run_at'], test_datetime)
def test_queueing_auth(self):
task_post_data = {
'task_def': self.task_def_name,
'data': {
'foo': 'bar'
}
}
client = APIClient()
response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(response.status_code, 401)
self.assertEqual(response.data, {'detail': 'Authentication credentials were not provided.'})
def test_queue_with_unique(self):
task_post_data = {
'task_def': self.task_def_name,
'unique': 'classifer-2343',
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(response.status_code, 201)
self.assertEqual(list(response.data.keys()), task_keys)
self.assertEqual(response.data['unique'], 'classifer-2343')
def test_queue_with_unique_conflict(self):
task_post_data = {
'task_def': self.task_def_name,
'unique': 'classifer-2343',
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
response1 = client.post('/tasks', task_post_data, format='json')
self.assertEqual(response1.status_code, 201)
self.assertEqual(list(response1.data.keys()), task_keys)
self.assertEqual(response1.data['unique'], 'classifer-2343')
response2 = client.post('/tasks', task_post_data, format='json')
self.assertEqual(response2.status_code, 409)
self.assertEqual(response2.data['detail'], 'Task `unique` field conflict')
def test_update_task(self):
task_post_data = {
'task_def': self.task_def_name,
'unique': 'classifer-2343',
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
create_response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(create_response.status_code, 201)
update = create_response.data
update['priority'] = 'high'
update_response = client.put('/tasks/' + str(update['id']), update, format='json')
self.assertEqual(update_response.status_code, 200)
self.assertEqual(list(update_response.data.keys()), task_keys)
self.assertEqual(update_response.data['priority'], 'high')
def test_update_task_auth(self):
task_post_data = {
'task_def': self.task_def_name,
'unique': 'classifer-2343',
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
create_response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(create_response.status_code, 201)
client = APIClient() # clear token
update = create_response.data
update['priority'] = 'high'
update_response = client.put('/tasks/' + str(update['id']), update, format='json')
self.assertEqual(update_response.status_code, 401)
self.assertEqual(update_response.data, {'detail': 'Authentication credentials were not provided.'})
def test_list_tasks(self):
task_post_data = {
'task_def': self.task_def_name,
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
task_1_repsonse = client.post('/tasks', task_post_data, format='json')
task_2_response = client.post('/tasks', task_post_data, format='json')
client = APIClient() # clear token
list_response = client.get('/tasks')
self.assertEqual(list_response.status_code, 200)
self.assertEqual(list(list_response.data.keys()), ['count',
'next',
'previous',
'results'])
self.assertEqual(len(list_response.data['results']), 2)
self.assertEqual(list(list_response.data['results'][0].keys()), task_keys)
self.assertEqual(list(list_response.data['results'][1].keys()), task_keys)
def test_get_task(self):
task_post_data = {
'task_def': self.task_def_name,
'unique': 'classifer-2343',
'data': {
'foo': 'bar'
}
}
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=self.token)
task_create_response = client.post('/tasks', task_post_data, format='json')
self.assertEqual(task_create_response.status_code, 201)
client = APIClient() # clear token
task_response = client.get('/tasks/' + str(task_create_response.data['id']))
self.assertEqual(task_response.status_code, 200)
self.assertEqual(list(task_response.data.keys()), task_keys)
|
|
#!/usr/bin/env python
# Copyright (C) 2011, Kees Bos <cornelis.bos@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create imports for gwt to import from pyjamas in stead of gwt"""
import sys
import os
import compiler
from compiler import ast
class CreateImports(object):
verbosity = 1
def __init__(self, srcFile, dstFile, base_directory='.'):
self.import_names = []
self.import_lines = []
self.srcFile = srcFile
self.dstFile = dstFile
parent, module = os.path.split(srcFile)
self.srcModule = parent.split(os.sep)
base_depth = len(base_directory.split(os.sep))
self.srcModule = self.srcModule[base_depth:]
if module.lower().endswith(".py"):
module = module[:-3]
if module != '__init__':
self.srcModule.append(module)
def log(self, logname, msg, lineno):
if lineno is None:
lineno = ''
else:
lineno = "(%s) " % lineno
if self.srcFile:
if lineno:
file = self.srcFile
else:
file = '%s ' % self.srcFile
else:
file = ''
sys.stderr.write("%(file)s%(lineno)s%(logname)s: %(msg)s\n" % locals())
def error(self, msg, lineno=None):
self.log('ERROR', msg, lineno)
def warning(self, msg, lineno=None):
self.log('WARNING', msg, lineno)
def getImportNames(self, srcFile=None, import_names=None):
savedSrcFile = self.srcFile
if srcFile is None:
srcFile = self.srcFile
else:
self.srcFile = srcFile
if import_names is None:
import_names = self.import_names
skip = False
try:
nodes = compiler.parseFile(srcFile).getChildNodes()[0]
except SyntaxError, e:
self.warning("Skipping. Syntax error: %s" % e)
skip = True
except:
exc = sys.exc_info()
self.error("Parse error %s" % (exc, ))
raise
if not skip:
dict_import_names = dict([(k, True) for k in import_names])
for node in nodes:
ast_name = node.__class__.__name__
method = getattr(self, "ast%s" % ast_name, None)
if method is not None:
method(dict_import_names, node)
elif self.verbosity:
self.warning('Skipping %s' % ast_name, node.lineno)
import_names[:] = dict_import_names.keys()
import_names.sort()
self.srcFile = savedSrcFile
return import_names
def addNames(self, import_names, node_names):
for names in node_names:
if isinstance(names, basestring):
name = names
elif names[1] is None:
name = names[0]
else:
name = names[1]
import_names[name] = True
def astImport(self, import_names, node):
self.addNames(import_names, node.names)
def astFrom(self, import_names, node):
if not node.modname in [
'__pyjamas__',
'__javascript__',
]:
self.addNames(import_names, node.names)
def astAssign(self, import_names, node):
for node in node.nodes:
if node.flags == 'OP_ASSIGN':
import_names[node.name] = True
else:
self.warning("Ignoring Assign %s" % node.flags, node.lineno)
def astAssName(self, import_names, node):
if node.flags == 'OP_DELETE':
if node.name in import_names:
del(import_names[node.name])
else:
self.warning("Ignoring AssName %s" % node.flags, node.lineno)
def astClass(self, import_names, node):
import_names[node.name] = True
def astFunction(self, import_names, node):
import_names[node.name] = True
def astGlobal(self, import_names, node):
self.addNames(import_names, node.names)
# Now all the completely ignored asts
def astTryExcept(self, import_names, node):
pass
def astIf(self, import_names, node):
pass
def astPrintnl(self, import_names, node):
pass
def astDiscard(self, import_names, node):
pass
def createImportLines(self, base=None, import_names=None):
if base is None:
base = '.'.join(self.srcModule)
if import_names is None:
import_names = self.import_names
lines = []
lines.append('from %s import (' % base)
for name in import_names:
lines.append(' %s,' % name)
lines.append(')')
self.import_lines[:] = lines
return lines
def createDestination(self, dstFile=None, import_lines=None):
if dstFile is None:
dstFile = self.dstFile
if import_lines is None:
import_lines = self.import_lines
try:
oldData = open(dstFile, 'r').readlines()
except IOError, e:
if e[0] == 2: # No such file or directory
oldData = []
else:
raise
newData = []
if len(import_lines) > 2:
for line in import_lines:
newData.append("%s\n" % line)
skip = False
for line in oldData:
sline = line.rstrip()
if skip:
if sline == ')':
skip = False
continue
if sline == import_lines[0]:
skip = True
continue
newData.append(line)
newData = "".join(newData)
open(dstFile, 'w').write(newData)
if __name__== '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"--gwt",
dest="gwt",
default=False,
action="store_true",
help="Add all gwt imports to pyjamas",
)
parser.add_option(
"--base-directory",
dest="base_directory",
default='.',
help="Directory to search for files",
)
options, args = parser.parse_args()
if options.gwt and len(args) > 0:
raise ValueError("--gwt and arguments are mutually exclusive")
if not options.gwt and len(args) == 0:
raise ValueError("Missing src and dst file or --gwt")
if not options.gwt:
if len(args) != 2:
raise ValueError("Too many arguments")
srcFile = os.path.join(dirname, sys.argv[1])
dstFile = os.path.join(dirname, sys.argv[2])
ci = CreateImports(
srcFile,
dstFile,
base_directory=options.base_directory,
)
ci.getImportNames()
ci.createImportLines()
ci.createDestination()
else:
gwtbase = os.path.join(options.base_directory, "gwt")
pyjsbase = os.path.join(options.base_directory, "pyjamas")
for dirname, subdirs, files in os.walk(gwtbase):
if 'platform' in subdirs:
subdirs.remove('platform')
for f in files:
if not f.lower().endswith(".py"):
continue
srcFile = os.path.join(dirname, f)
dstFile = srcFile.replace(gwtbase, pyjsbase)
ci = CreateImports(
srcFile,
dstFile,
base_directory=options.base_directory,
)
ci.getImportNames()
ci.createImportLines()
ci.createDestination()
|
|
import io
import re
import serial
import time
import glob
from pocs.focuser.focuser import AbstractFocuser
# Birger adaptor serial numbers should be 5 digits
serial_number_pattern = re.compile('^\d{5}$')
# Error codes should be 'ERR' followed by 1-2 digits
error_pattern = re.compile('(?<=ERR)\d{1,2}')
error_messages = ('No error',
'Unrecognised command',
'Lens is in manual focus mode',
'No lens connected',
'Lens distance stop error',
'Aperture not initialised',
'Invalid baud rate specified',
'Reserved',
'Reserved',
'A bad parameter was supplied to the command',
'XModem timeout',
'XModem error',
'XModem unlock code incorrect',
'Not used',
'Invalid port',
'Licence unlock failure',
'Invalid licence file',
'Invalid library file',
'Reserved',
'Reserved',
'Not used',
'Library not ready for lens communications',
'Library not ready for commands',
'Command not licensed',
'Invalid focus range in memory. Try relearning the range',
'Distance stops not supported by the lens')
class Focuser(AbstractFocuser):
"""
Focuser class for control of a Canon DSLR lens via a Birger Engineering Canon EF-232 adapter
"""
# Class variable to cache the device node scanning results
_birger_nodes = None
# Class variable to store the device nodes already in use. Prevents scanning known Birgers &
# acts as a check against Birgers assigned to incorrect ports.
_assigned_nodes = []
def __init__(self,
name='Birger Focuser',
model='Canon EF-232',
initial_position=None,
dev_node_pattern='/dev/tty.USA49WG*.?',
*args, **kwargs):
super().__init__(name=name, model=model, *args, **kwargs)
self.logger.debug('Initialising Birger focuser')
if serial_number_pattern.match(self.port):
# Have been given a serial number
if self._birger_nodes is None:
# No cached device nodes scanning results, need to scan.
self._birger_nodes = {}
# Find nodes matching pattern
device_nodes = glob.glob(dev_node_pattern)
# Remove nodes already assigned to other Birger objects
device_nodes = [node for node in device_nodes if node not in self._assigned_nodes]
for device_node in device_nodes:
try:
serial_number = self.connect(device_node)
self._birger_nodes[serial_number] = device_node
except (serial.SerialException, serial.SerialTimeoutException, AssertionError):
# No birger on this node.
pass
finally:
self._serial_port.close()
# Search in cached device node scanning results for serial number
try:
device_node = self._birger_nodes[self.port]
except KeyError:
self.logger.critical("Could not find {} ({})!".format(self.name, self.port))
return
self.port = device_node
# Check that this node hasn't already been assigned to another Birgers
if self.port in self._assigned_nodes:
self.logger.critical("Device node {} already in use!".format(self.port))
return
self.connect(self.port)
self._assigned_nodes.append(self.port)
self._initialise
if initial_position:
self.position = initial_position
##################################################################################################
# Properties
##################################################################################################
@property
def is_connected(self):
"""
Checks status of serial port to determine if connected.
"""
connected = False
if self._serial_port:
connected = self._serial_port.isOpen()
return connected
@AbstractFocuser.position.getter
def position(self):
"""
Returns current focus position in the lens focus encoder units
"""
response = self._send_command('pf', response_length=1)
return int(response[0].rstrip())
@property
def min_position(self):
"""
Returns position of close limit of focus travel, in encoder units
"""
return self._min_position
@property
def max_position(self):
"""
Returns position of far limit of focus travel, in encoder units
"""
return self._max_position
@property
def lens_info(self):
"""
Return basic lens info (e.g. '400mm,f28' for a 400 mm f/2.8 lens)
"""
return self._lens_info
@property
def library_version(self):
"""
Returns the version string of the Birger adaptor library (firmware).
"""
return self._library_version
@property
def hardware_version(self):
"""
Returns the hardware version of the Birger adaptor
"""
return self._hardware_version
##################################################################################################
# Public Methods
##################################################################################################
def connect(self, port):
try:
# Configure serial port.
# Settings copied from Bob Abraham's birger.c
self._serial_port = serial.Serial()
self._serial_port.port = port
self._serial_port.baudrate = 115200
self._serial_port.bytesize = serial.EIGHTBITS
self._serial_port.parity = serial.PARITY_NONE
self._serial_port.stopbits = serial.STOPBITS_ONE
self._serial_port.timeout = 2.0
self._serial_port.xonxoff = False
self._serial_port.rtscts = False
self._serial_port.dsrdtr = False
self._serial_port.write_timeout = None
self._inter_byte_timeout = None
# Establish connection
self._serial_port.open()
except serial.SerialException as err:
self._serial_port = None
self.logger.critical('Could not open {}!'.format(port))
raise err
time.sleep(2)
# Want to use a io.TextWrapper in order to have a readline() method with universal newlines
# (Birger sends '\r', not '\n'). The line_buffering option causes an automatic flush() when
# a write contains a newline character.
self._serial_io = io.TextIOWrapper(io.BufferedRWPair(self._serial_port, self._serial_port),
newline='\r', encoding='ascii', line_buffering=True)
self.logger.debug('Established serial connection to {} on {}.'.format(self.name, port))
# Set 'verbose' and 'legacy' response modes. The response from this depends on
# what the current mode is... but after a power cycle it should be 'rm1,0', 'OK'
try:
self._send_command('rm1,0', response_length=0)
except AssertionError as err:
self.logger.critical('Error communicating with {} on {}!'.format(self.name, port))
raise err
# Return serial number
return send_command('sn', response_length=1)[0].rstrip()
def move_to(self, position):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested position but will warn if the lens reports hitting a stop.
Returns the actual position moved to in lens encoder units.
"""
response = self._send_command('fa{:d}'.format(int(position)), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved to {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
def move_by(self, increment):
"""
Move the focus to a specific position in lens encoder units.
Does not do any checking of the requested increment but will warn if the lens reports hitting a stop.
Returns the actual distance moved in lens encoder units.
"""
response = self._send_command('mf{:d}'.format(increment), response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,N'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved by {} encoder units".format(r[:-2]))
if r[-1] == '1':
self.logger.warning('{} reported hitting a focus stop'.format(self))
return int(r[:-2])
##################################################################################################
# Private Methods
##################################################################################################
def _send_command(self, command, response_length=None, ignore_response=False):
"""
Sends a command to the Birger adaptor and retrieves the response.
Args:
command (string): command string to send (without newline), e.g. 'fa1000', 'pf'
response length (integer, optional, default=None): number of lines of response expected.
For most commands this should be 0 or 1. If None readlines() will be called to
capture all responses. As this will block until the timeout expires it should only
be used if the number of lines expected is not known (e.g. 'ds' command).
Returns:
list: possibly empty list containing the '\r' terminated lines of the response from the adaptor.
"""
if not self.is_connected:
self.logger.critical("Attempt to send command to {} when not connected!".format(self))
return
# Clear the input buffer in case there's anything left over in there.
self._serial_port.reset_input_buffer()
# Send command
self._serial_io.write(command + '\r')
if ignore_response:
return
# In verbose mode adaptor will first echo the command
echo = self._serial_io.readline().rstrip()
assert echo == command, self.logger.warning("echo != command: {} != {}".format(echo, command))
# Adaptor should then send 'OK', even if there was an error.
ok = self._serial_io.readline().rstrip()
assert ok == 'OK'
# Depending on which command was sent there may or may not be any further
# response.
response = []
if response_length == 0:
# Not expecting any further response. Should check the buffer anyway in case an error
# message has been sent.
if self._serial_port.in_waiting:
response.append(self._serial_io.readline())
elif response_length > 0:
# Expecting some number of lines of response. Attempt to read that many lines.
for i in range(response_length):
response.append(self._serial_io.readline())
else:
# Don't know what to expect. Call readlines() to get whatever is there.
response.append(self._serial_io.readlines())
# Check for an error message in response
if response:
# Not an empty list.
error_match = error_pattern.match(response[0])
if error_match:
# Got an error message! Translate it.
try:
error_message = error_messages[int(error_match.group())]
self.logger.error("{} returned error message '{}'!".format(self, error_message))
except Exception:
self.logger.error("Unknown error '{}' from {}!".format(error_match.group(), self))
return response
def _initialise(self):
# Get serial number. Note, this is the serial number of the Birger adaptor,
# *not* the attached lens (which would be more useful). Accessible as self.uid
self._get_serial_number()
# Get the version string of the adaptor software libray. Accessible as self.library_version
self._get_library_version()
# Get the hardware version of the adaptor. Accessible as self.hardware_version
self._get_hardware_version()
# Get basic lens info (e.g. '400mm,f28' for a 400 mm, f/2.8 lens). Accessible as self.lens_info
self._get_lens_info()
# Initialise the aperture motor. This also has the side effect of fully opening the iris.
self._initialise_aperture()
# Initalise focus. First move the focus to the close stop.
self._move_zero()
# Then reset the focus encoder counts to 0
self._zero_encoder()
self._min_position = 0
# Calibrate the focus with the 'Learn Absolute Focus Range' command
self._learn_focus_range()
# Finally move the focus to the far stop (close to where we'll want it) and record position
self._max_position = self._move_inf()
self.logger.info('\t\t\t {} initialised'.format(self))
def _get_serial_number(self):
response = self._send_command('sn', response_length=1)
self._serial_number = response[0].rstrip()
self.logger.debug("Got serial number {} for {} on {}".format(self.uid, self.name, self.port))
def _get_library_version(self):
response = self._send_command('lv', response_length=1)
self._library_version = response[0].rstrip()
self.logger.debug("Got library version '{}' for {} on {}".format(self.library_version, self.name, self.port))
def _get_hardware_version(self):
response = self._send_command('hv', response_length=1)
self._hardware_version = response[0].rstrip()
self.logger.debug("Got hardware version {} for {} on {}".format(self.hardware_version, self.name, self.port))
def _get_lens_info(self):
response = self._send_command('id', response_length=1)
self._lens_info = response[0].rstrip()
self.logger.debug("Got lens info '{}' for {} on {}".format(self.lens_info, self.name, self.port))
def _initialise_aperture(self):
self.logger.debug('Initialising aperture motor')
response = self._send_command('in', response_length=1)
if response[0].rstrip() != 'DONE':
self.logger.error("{} got response '{}', expected 'DONE'!".format(self, response[0].rstrip()))
def _move_zero(self):
response = self._send_command('mz', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to close stop".format(r[:-2]))
return int(r[:-2])
def _zero_encoder(self):
self.logger.debug('Setting focus encoder zero point')
self._send_command('sf0', response_length=0)
def _learn_focus_range(self):
self.logger.debug('Learning absolute focus range')
response = self._send_command('la', response_length=1)
if response[0].rstrip() != 'DONE:LA':
self.logger.error("{} got response '{}', expected 'DONE:LA'!".format(self, response[0].rstrip()))
def _move_inf(self):
response = self._send_command('mi', response_length=1)
if response[0][:4] != 'DONE':
self.logger.error("{} got response '{}', expected 'DONENNNNN,1'!".format(self, response[0].rstrip()))
else:
r = response[0][4:].rstrip()
self.logger.debug("Moved {} encoder units to far stop".format(r[:-2]))
return int(r[:-2])
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from horizon_lib import messages
from horizon_lib import workflows
from openstack_horizon import api
from openstack_horizon.dashboards.identity.domains import constants
LOG = logging.getLogger(__name__)
class CreateDomainInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
class Meta:
name = _("Domain Information")
slug = "create_domain"
help_text = _("Domains provide separation between users and "
"infrastructure used by different organizations.")
class CreateDomainInfo(workflows.Step):
action_class = CreateDomainInfoAction
contributes = ("domain_id",
"name",
"description",
"enabled")
class UpdateDomainUsersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateDomainUsersAction, self).__init__(request,
*args,
**kwargs)
domain_id = self.initial.get("domain_id", '')
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a domain
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
_('Unable to find default role.'),
redirect=reverse(constants.DOMAINS_INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve user list.'))
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve role list.'),
redirect=reverse(constants.DOMAINS_INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if domain_id:
try:
users_roles = api.keystone.get_domain_users_roles(request,
domain_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve user domain role '
'assignments.'),
redirect=reverse(
constants.DOMAINS_INDEX_URL))
for user_id in users_roles:
roles_ids = users_roles[user_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(user_id)
class Meta:
name = _("Domain Members")
slug = constants.DOMAIN_USER_MEMBER_SLUG
class UpdateDomainUsers(workflows.UpdateMembersStep):
action_class = UpdateDomainUsersAction
available_list_title = _("All Users")
members_list_title = _("Domain Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
context = super(UpdateDomainUsers, self).contribute(data, context)
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'),
redirect=reverse(
constants.DOMAINS_INDEX_URL))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateDomainGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateDomainGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
domain_id = self.initial.get("domain_id", '')
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a domain
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(constants.DOMAINS_INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(constants.DOMAINS_INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if domain_id:
for group in all_groups:
try:
roles = api.keystone.roles_for_group(self.request,
group=group.id,
domain=domain_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(
constants.DOMAINS_INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(group.id)
class Meta:
name = _("Domain Groups")
slug = constants.DOMAIN_GROUP_MEMBER_SLUG
class UpdateDomainGroups(workflows.UpdateMembersStep):
action_class = UpdateDomainGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Domain Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
context = super(UpdateDomainGroups, self).contribute(data, context)
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CreateDomain(workflows.Workflow):
slug = "create_domain"
name = _("Create Domain")
finalize_button_name = _("Create Domain")
success_message = _('Created new domain "%s".')
failure_message = _('Unable to create domain "%s".')
success_url = constants.DOMAINS_INDEX_URL
default_steps = (CreateDomainInfo, )
def format_status_message(self, message):
return message % self.context.get('name', 'unknown domain')
def handle(self, request, data):
# create the domain
try:
LOG.info('Creating domain with name "%s"' % data['name'])
desc = data['description']
api.keystone.domain_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'])
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class UpdateDomainInfoAction(CreateDomainInfoAction):
class Meta:
name = _("Domain Information")
slug = 'update_domain'
help_text = _("Domains provide separation between users and "
"infrastructure used by different organizations. "
"Edit the domain details to add or remove "
"groups in the domain.")
class UpdateDomainInfo(workflows.Step):
action_class = UpdateDomainInfoAction
depends_on = ("domain_id",)
contributes = ("name",
"description",
"enabled")
class UpdateDomain(workflows.Workflow):
slug = "update_domain"
name = _("Edit Domain")
finalize_button_name = _("Save")
success_message = _('Modified domain "%s".')
failure_message = _('Unable to modify domain "%s".')
success_url = constants.DOMAINS_INDEX_URL
default_steps = (UpdateDomainInfo,
UpdateDomainUsers,
UpdateDomainGroups)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown domain')
def _update_domain_members(self, request, domain_id, data):
# update domain members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(constants.DOMAIN_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = api.keystone.role_list(request)
# Get the users currently associated with this project so we
# can diff against it.
domain_members = api.keystone.user_list(request,
domain=domain_id)
users_to_modify = len(domain_members)
for user in domain_members:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_user(self.request,
user.id,
domain=domain_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_domain_user_role(
request,
domain=domain_id,
user=user.id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Prevent admins from doing stupid things to themselves.
is_current_user = user.id == request.user.id
# TODO(lcheng) When Horizon moves to Domain scoped token for
# invoking identity operation, replace this with:
# domain_id == request.user.domain_id
is_current_domain = True
admin_roles = [role for role in current_roles
if role.name.lower() == 'admin']
if len(admin_roles):
removing_admin = any([role.id in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_domain and removing_admin:
# Cannot remove "admin" role on current(admin) domain
msg = _('You cannot revoke your administrative privileges '
'from the domain you are currently logged into. '
'Please switch to another domain with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
# Otherwise go through and revoke any removed roles.
else:
for id_to_delete in current_role_ids:
api.keystone.remove_domain_user_role(
request,
domain=domain_id,
user=user.id,
role=id_to_delete)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if not filter(lambda x: user_id == x.id, domain_members):
api.keystone.add_tenant_user_role(request,
project=domain_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
return True
except Exception:
exceptions.handle(request,
_('Failed to modify %s project '
'members and update domain groups.')
% users_to_modify)
return False
def _update_domain_groups(self, request, domain_id, data):
# update domain groups
groups_to_modify = 0
member_step = self.get_step(constants.DOMAIN_GROUP_MEMBER_SLUG)
try:
# Get our role options
available_roles = api.keystone.role_list(request)
# Get the groups currently associated with this domain so we
# can diff against it.
domain_groups = api.keystone.group_list(request,
domain=domain_id)
groups_to_modify = len(domain_groups)
for group in domain_groups:
# Check if there have been any changes in the roles of
# Existing domain members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
domain=domain_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
domain=domain_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
domain=domain_id)
groups_to_modify -= 1
# Grant new roles on the domain.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id, domain_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
domain=domain_id)
groups_added += 1
groups_to_modify -= groups_added
return True
except Exception:
exceptions.handle(request,
_('Failed to modify %s domain groups.')
% groups_to_modify)
return False
def handle(self, request, data):
domain_id = data.pop('domain_id')
try:
LOG.info('Updating domain with name "%s"' % data['name'])
api.keystone.domain_update(request,
domain_id=domain_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'])
except Exception:
exceptions.handle(request, ignore=True)
return False
if not self._update_domain_members(request, domain_id, data):
return False
if not self._update_domain_groups(request, domain_id, data):
return False
return True
|
|
import upseto
import unittest
import gitwrapper
import upsetowrapper
import upseto.manifest
import upseto.gitwrapper
import os
import shutil
import tempfile
import zipfile
class Test(unittest.TestCase):
def setUp(self):
gitwrapper.setUp()
def tearDown(self):
gitwrapper.tearDown()
class SimpleManifest_OneProjectDependsOnTwoOthers:
def __init__(self, test):
self.project1 = gitwrapper.GitHub("project1")
self.project2 = gitwrapper.GitHub("project2")
self.requiringProject = gitwrapper.GitHub("requiringProject")
self.localClone1 = gitwrapper.LocalClone(self.project1)
self.localClone2 = gitwrapper.LocalClone(self.project2)
self.localRequiringProject = gitwrapper.LocalClone(self.requiringProject)
test.assertEquals(self.project1.hash('master'), self.localClone1.hash())
test.assertEquals(self.project2.hash('master'), self.localClone2.hash())
test.assertEquals(self.requiringProject.hash(), self.localRequiringProject.hash())
upsetowrapper.run(self.localRequiringProject, "addRequirement project1")
upsetowrapper.run(self.localRequiringProject, "addRequirement project2")
test.assertTrue(os.path.exists(self.localRequiringProject.manifestFilename()))
self.localRequiringProject.addCommitPushManifest()
self.manifest = upseto.manifest.Manifest.fromDir(self.localRequiringProject.directory())
requirements = self.manifest.requirements()
test.assertEquals(len(requirements), 2)
test.assertEquals(requirements[0]['originURL'], "file://" + self.project1.directory())
test.assertEquals(requirements[0]['hash'], self.project1.hash())
test.assertEquals(requirements[1]['originURL'], "file://" + self.project2.directory())
test.assertEquals(requirements[1]['hash'], self.project2.hash())
def addThirdTier(self):
self.recursiveProject = gitwrapper.GitHub("recursiveProject")
self.localRecursiveProject = gitwrapper.LocalClone(self.recursiveProject)
upsetowrapper.run(self.localRecursiveProject, "addRequirement requiringProject")
self.localRecursiveProject.addCommitPushManifest()
def addFourthTier(self):
self.fourthTierProject = gitwrapper.GitHub('forthTier')
self.localFourthTierProject = gitwrapper.LocalClone(self.fourthTierProject)
upsetowrapper.run(self.localFourthTierProject, "addRequirement recursiveProject")
self.localFourthTierProject.addCommitPushManifest()
def test_simpleManifest_OneProjectDependsOnTwoOthers_RequirementsFetched(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
shutil.rmtree(gitwrapper.localClonesDir())
localRequiringProject = gitwrapper.LocalClone(case.requiringProject)
upsetowrapper.run(localRequiringProject, "fulfillRequirements")
self.assertEquals(case.project1.hash('master'), case.localClone1.hash())
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
upsetowrapper.run(localRequiringProject, "fulfillRequirements") # does nothing
self.assertEquals(case.project1.hash('master'), case.localClone1.hash())
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
def test_simpleManifest_NothingToBeDone(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
upsetowrapper.run(case.localRequiringProject, "fulfillRequirements")
self.assertEquals(case.project1.hash('master'), case.localClone1.hash())
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
def test_checkRequirements(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
upsetowrapper.run(case.localRequiringProject, "checkRequirements")
shutil.rmtree(case.localClone2.directory())
upsetowrapper.runShouldFail(case.localRequiringProject, "checkRequirements", "exist")
def test_simpleManifest_DetachedVersion(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
hashBefore = case.localClone1.hash()
case.localClone1.createAddCommitPush('anotherfile')
hashAfter = case.localClone1.hash()
self.assertNotEqual(hashBefore, hashAfter)
upsetowrapper.run(case.localRequiringProject, "fulfillRequirements")
self.assertEquals(case.localClone1.hash(), hashBefore)
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
def test_recursiveRequirements(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
shutil.rmtree(gitwrapper.localClonesDir())
localRecursiveProject = gitwrapper.LocalClone(case.recursiveProject)
upsetowrapper.run(localRecursiveProject, "fulfillRequirements")
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
self.assertEquals(case.project1.hash('master'), case.localClone1.hash())
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
def test_recursiveRequirementDirectlyRequiresFirstLayerProject(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
upsetowrapper.run(case.localRecursiveProject, "addRequirement project1")
case.localRecursiveProject.addCommitPushManifest()
shutil.rmtree(gitwrapper.localClonesDir())
localRecursiveProject = gitwrapper.LocalClone(case.recursiveProject)
upsetowrapper.run(localRecursiveProject, "fulfillRequirements")
self.assertEquals(case.requiringProject.hash('master'), case.localRequiringProject.hash())
self.assertEquals(case.project1.hash('master'), case.localClone1.hash())
self.assertEquals(case.project2.hash('master'), case.localClone2.hash())
def test_refusesToCreateHashInconsistency_TwoProjectPointAtSameOriginWithDifferentHashes(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
case.localClone1.createAddCommitPush("anotherfile")
upsetowrapper.runShouldFail(case.localRecursiveProject, "addRequirement project1", "hash paradox")
upsetowrapper.runShouldFail(case.localRecursiveProject, "checkRequirements", "hash")
upsetowrapper.run(case.localRecursiveProject, "fulfillRequirements")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements")
upsetowrapper.run(case.localRecursiveProject, "addRequirement project1")
def test_updateVersion(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.localClone1.createAddCommitPush("anotherfile")
newHash = case.localClone1.hash()
upsetowrapper.runShouldFail(case.localRequiringProject, "checkRequirements", "hash")
upsetowrapper.run(case.localRequiringProject, "fulfillRequirements")
upsetowrapper.run(case.localRequiringProject, "checkRequirements")
self.assertNotEqual(case.localClone1.hash(), newHash)
case.localClone1.checkout('master')
self.assertEqual(case.localClone1.hash(), newHash)
upsetowrapper.run(case.localRequiringProject, "addRequirement project1")
upsetowrapper.run(case.localRequiringProject, "checkRequirements")
self.assertEqual(case.localClone1.hash(), newHash)
def test_circle(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
upsetowrapper.runShouldFail(case.localClone1, "addRequirement recursiveProject", "circle")
upsetowrapper.run(case.localRecursiveProject, "delRequirement requiringProject")
upsetowrapper.run(case.localClone1, "addRequirement recursiveProject")
upsetowrapper.runShouldFail(case.localRecursiveProject, "addRequirement requiringProject", "circle")
def test_show(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
result = upsetowrapper.run(case.localRecursiveProject, "checkRequirements --show")
print "\nupseto checkRequirements --show"
print result
self.assertIn('file://%s\t%s' % (case.project1.directory(), case.project1.hash('master')), result)
self.assertIn('file://%s\t%s' % (case.project2.directory(), case.project2.hash('master')), result)
def pythonNamespacesTestcase(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
os.makedirs(case.localClone1.directory() + "/py/namespace")
with open(case.localClone1.directory() + "/py/namespace/__init__.py", "w") as f:
f.write("")
with open(case.localClone1.directory() + "/py/namespace/module_a.py", "w") as f:
f.write("VARIABLE='value'\n")
with open(case.localClone1.directory() + "/py/withoutnamespace.py", "w") as f:
f.write("VARIABLE='yetanothervalue'\n")
os.makedirs(case.localRequiringProject.directory() + "/py/namespace")
with open(case.localRequiringProject.directory() + "/py/namespace/__init__.py", "w") as f:
f.write("import upseto.pythonnamespacejoin\n"
"__path__.extend(upseto.pythonnamespacejoin.join(globals()))\n")
with open(case.localRequiringProject.directory() + "/py/namespace/module_b.py", "w") as f:
f.write("VARIABLE='other value'\n")
with open(case.localRequiringProject.directory() + "/test.py", "w") as f:
f.write(
"#this just verifies we are not using an installed version of upseto\n"
"import upseto\n"
"assert '/usr/' not in upseto.__file__\n"
"#this gets called automatically with the side upseto.pth hook, \n"
"#but since upseto is not yet installed, we call it directly\n"
"import upseto.pythonnamespacejoin; upseto.pythonnamespacejoin.extendPath()\n"
"#this is how it's really used\n"
"import namespace.module_a\n"
"import namespace.module_b\n"
"assert namespace.module_a.VARIABLE == 'value'\n"
"assert namespace.module_b.VARIABLE == 'other value'\n"
"import withoutnamespace\n"
"assert withoutnamespace.VARIABLE == 'yetanothervalue'\n")
return case
def test_pythonNamespaceJoining(self):
case = self.pythonNamespacesTestcase()
case.localRequiringProject.run(
'UPSETO_JOIN_PYTHON_NAMESPACES=yes PYTHONPATH=py:$PYTHONPATH python test.py')
def test_recursiveGitInvocation(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
firstCommitFile = os.path.join(case.localClone1.directory(), "firstCommitFile")
self.assertTrue(os.path.exists(firstCommitFile))
with open(firstCommitFile, "a") as f:
f.write("\n")
self.assertIn('M firstCommitFile', case.localClone1.shortStatus())
result = upsetowrapper.run(case.localRecursiveProject, "git status -s")
print "\nupseto git status -s"
print result
self.assertIn('M firstCommitFile', result)
def test_packegg(self):
case = self.pythonNamespacesTestcase()
temp = tempfile.NamedTemporaryFile(suffix=".egg")
upsetowrapper.packEgg(
case.localRequiringProject,
"--joinPythonNamespaces --entryPoint=test.py --output=%s" % temp.name,
"py:%s/py" % case.localClone1.directory())
upsetowrapper.runWhatever('/', "PYTHONPATH=%s python -m test" % temp.name)
def test_packegg_replacesInitFileWithEmptyFile(self):
case = self.pythonNamespacesTestcase()
temp = tempfile.NamedTemporaryFile(suffix=".egg")
upsetowrapper.packEgg(
case.localRequiringProject,
"--joinPythonNamespaces --entryPoint=test.py --output=%s" % temp.name,
"py:%s/py" % case.localClone1.directory())
with zipfile.ZipFile(temp.name) as z:
for name in z.namelist():
if name.endswith('__init__.py'):
contents = z.read(name).strip()
self.assertEqual(contents, "")
def test_checkWorkspaceUnsullied(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
upsetowrapper.run(case.localRecursiveProject, "checkRequirements --unsullied")
os.mkdir(os.path.join(gitwrapper.localClonesDir(), "projectoutsideofupseto"))
upsetowrapper.runShouldFail(case.localRecursiveProject, "checkRequirements --unsullied", "sullied")
def test_checkWorkspaceClean(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
upsetowrapper.run(case.localRecursiveProject, "checkRequirements --gitClean")
output = upsetowrapper.run(case.localRecursiveProject, "git status -s")
self.assertEquals(len([l for l in output.strip().split("\n") if not l.startswith("#")]), 0)
with open(case.localClone1.directory() + "/notcheckedin", "w") as f:
f.write("i'm here to make things dirty")
output = upsetowrapper.run(case.localRecursiveProject, "git status -s")
self.assertEquals(len([l for l in output.strip().split("\n") if not l.startswith("#")]), 1)
upsetowrapper.runShouldFail(case.localRecursiveProject, "checkRequirements --gitClean", "clean")
os.unlink(case.localClone1.directory() + "/notcheckedin")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements --gitClean")
with open(case.localRecursiveProject.directory() + "/notcheckedin", "w") as f:
f.write("i'm here to make things dirty")
upsetowrapper.runShouldFail(case.localRecursiveProject, "checkRequirements --gitClean", "clean")
def test_checkRequirementsOnNonUpsetoedProjects(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
upsetowrapper.runShouldFail(case.localClone1, "checkRequirements", "manifest")
upsetowrapper.runShouldFail(case.localClone2, "checkRequirements", "manifest")
upsetowrapper.run(case.localRequiringProject, "checkRequirements")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements")
upsetowrapper.run(case.localClone1, "checkRequirements --allowNoManifest")
upsetowrapper.run(case.localClone2, "checkRequirements --allowNoManifest")
upsetowrapper.run(case.localRequiringProject, "checkRequirements --allowNoManifest")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements --allowNoManifest")
def test_Bugfix_reloadManifestAfterFulfillingRequirements_AlsoTestsDiamondAddRequirement(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
betweenProject = gitwrapper.GitHub("between")
localBetweenProject = gitwrapper.LocalClone(betweenProject)
upsetowrapper.run(localBetweenProject, "addRequirement requiringProject")
localBetweenProject.addCommitPushManifest()
upsetowrapper.run(case.localRecursiveProject, "addRequirement between")
case.localRecursiveProject.addCommitPushManifest()
previousHash = case.localRecursiveProject.hash()
case.localClone1.createAddCommitPush("nextgeneration")
upsetowrapper.run(case.localRequiringProject, "addRequirement project1")
case.localRequiringProject.addCommitPushManifest()
upsetowrapper.run(localBetweenProject, "addRequirement requiringProject")
localBetweenProject.addCommitPushManifest()
upsetowrapper.run(case.localRecursiveProject, "addRequirement requiringProject between")
case.localRecursiveProject.addCommitPushManifest()
case.localRecursiveProject.checkout(previousHash)
upsetowrapper.run(case.localRecursiveProject, "fulfillRequirements")
case.localRecursiveProject.checkout('master')
upsetowrapper.run(case.localRecursiveProject, "fulfillRequirements")
def test_resolveParadoxByLocalManifestRequirements(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
case.localClone1.createAddCommitPush("anotherfile")
upsetowrapper.run(
case.localRecursiveProject, "addRequirement project1 --dirtyParadoxResolution project1")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements")
self.assertTrue(os.path.exists(os.path.join(case.localClone1.directory(), "anotherfile")))
upsetowrapper.run(case.localRecursiveProject, "fulfillRequirements")
upsetowrapper.run(case.localRecursiveProject, "checkRequirements")
self.assertTrue(os.path.exists(os.path.join(case.localClone1.directory(), "anotherfile")))
upsetowrapper.runShouldFail(case.localRecursiveProject, "addRequirement project1", "hash paradox")
upsetowrapper.run(
case.localRecursiveProject, "addRequirement project1 --dirtyParadoxResolution project1")
def test_resolveParadoxByLocalManifestRequirements_CollisionBetweenResolutions(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.addThirdTier()
case.addFourthTier()
upsetowrapper.run(case.localFourthTierProject, "addRequirement project1")
hashBefore = case.localClone1.hash()
case.localClone1.createAddCommitPush("anotherfile")
upsetowrapper.run(
case.localRecursiveProject, "addRequirement project1 --dirtyParadoxResolution project1")
case.localRecursiveProject.addCommitPushManifest()
self.assertTrue(os.path.exists(os.path.join(case.localClone1.directory(), "anotherfile")))
upsetowrapper.runShouldFail(
case.localFourthTierProject, "addRequirement recursiveProject", "hash paradox")
upsetowrapper.runShouldFail(
case.localFourthTierProject,
"addRequirement recursiveProject --dirtyParadoxResolution project1",
"hash")
case.localClone1.checkout(hashBefore)
upsetowrapper.run(
case.localFourthTierProject,
"addRequirement recursiveProject --dirtyParadoxResolution project1")
def test_Bugfix_fulfillRequirementChecksOutWrongVersionOnDirtyParadoxResolution(self):
case = self.SimpleManifest_OneProjectDependsOnTwoOthers(self)
case.localClone1.createAddCommitPush("anotherfile")
correct = case.localClone1.hash()
case.recursiveProject = gitwrapper.GitHub("recursiveProject")
case.localRecursiveProject = gitwrapper.LocalClone(case.recursiveProject)
upsetowrapper.run(case.localRecursiveProject, "addRequirement project1")
upsetowrapper.run(
case.localRecursiveProject,
"addRequirement requiringProject --dirtyParadoxResolution project1")
case.localRecursiveProject.addCommitPushManifest()
with open(case.localRecursiveProject.manifestFilename()) as f:
manifestContents = f.read()
dirtyParadoxIsFirstLineOrBugIsNotRecreated = 'dirty' in manifestContents.split("\n")[1].lower()
self.assertTrue(dirtyParadoxIsFirstLineOrBugIsNotRecreated)
upsetowrapper.run(case.localRecursiveProject, "fulfillRequirements")
self.assertEquals(case.localClone1.hash(), correct)
upsetowrapper.run(case.localRecursiveProject, "checkRequirements")
def test_normalizeURL(self):
self.assertEquals(upseto.gitwrapper.normalizeOriginURL('https://github.com/Strato/project.git'),
"https://github.com/Strato/project")
self.assertEquals(upseto.gitwrapper.normalizeOriginURL('https://github.com/Strato/project'),
"https://github.com/Strato/project")
self.assertEquals(upseto.gitwrapper.normalizeOriginURL('git@github.com:Strato/project'),
"https://github.com/Strato/project")
self.assertEquals(upseto.gitwrapper.normalizeOriginURL('git@github.com:Strato/project.git'),
"https://github.com/Strato/project")
self.assertEquals(upseto.gitwrapper.normalizeOriginURL('file:///a/directory/with/repo'),
"file:///a/directory/with/repo")
# temporary deps that resolve paradoxes - for clashes in recursive deps that are not direct deps
# test no project can be added file not found or not git
# test can not remove
# test basenames collision
# test manifest files must not be in modified state - either committed or unknown
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""
The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
Prepares language bindings for LLDB build process. Run with --help
to see a description of the supported command line arguments.
"""
# Python modules:
import argparse
import logging
import os
import platform
import sys
# LLDB modules:
import use_lldb_suite
from lldbsuite.support import fs
def prepare_binding_for_language(scripts_dir, script_lang, options):
"""Prepares the binding for a specific language.
@param scripts_dir the full path to the scripts source directory.
@param script_lang the name of the script language. Should be a child
directory within the scripts dir, and should contain a
prepare_scripts_{script_lang}.py script file in it.
@param options the dictionary of parsed command line options.
There is no return value. If it returns, the process succeeded; otherwise,
the process will exit where it fails.
"""
# Ensure the language-specific prepare module exists.
script_name = "prepare_binding_{}.py".format(script_lang)
lang_path = os.path.join(scripts_dir, script_lang)
script_path = os.path.join(lang_path, script_name)
if not os.path.exists(script_path):
logging.error(
"failed to find prepare script for language '%s' at '%s'",
script_lang,
script_path)
sys.exit(-9)
# Include this language-specific directory in the Python search
# path.
sys.path.append(os.path.normcase(lang_path))
# Execute the specific language script
module_name = os.path.splitext(script_name)[0]
module = __import__(module_name)
module.main(options)
# Remove the language-specific directory from the Python search path.
sys.path.remove(os.path.normcase(lang_path))
def prepare_all_bindings(options):
"""Prepares bindings for each of the languages supported.
@param options the parsed arguments from the command line
@return the exit value for the program. 0 is success, all othes
indicate some kind of failure.
"""
# Check for the existence of the SWIG scripts folder
scripts_dir = os.path.join(options.src_root, "scripts")
if not os.path.exists(scripts_dir):
logging.error("failed to find scripts dir: '%s'", scripts_dir)
sys.exit(-8)
child_dirs = ["Python"]
# Iterate script directory find any script language directories
for script_lang in child_dirs:
logging.info("executing language script for: '%s'", script_lang)
prepare_binding_for_language(scripts_dir, script_lang, options)
def process_args(args):
"""Returns options processed from the provided command line.
@param args the command line to process.
"""
# Setup the parser arguments that are accepted.
parser = argparse.ArgumentParser(
description="Prepare language bindings for LLDB build.")
# Arguments to control logging verbosity.
parser.add_argument(
"--debug", "-d",
action="store_true",
help="Set program logging level to DEBUG.")
parser.add_argument(
"--verbose", "-v",
action="count",
default=0,
help=(
"Increase logging verbosity level. Default: only error and "
"higher are displayed. Each -v increases level of verbosity."))
# Arguments to control whether we're building an OS X-style
# framework. This is the opposite of the older "-m" (makefile)
# option.
parser.add_argument(
"--config-build-dir",
"--cfgBldDir",
help=(
"Configuration build dir, will use python module path "
"if unspecified."))
parser.add_argument(
"--find-swig",
action="store_true",
help=(
"Indicates the swig executable should be searched for "
"if not eplicitly provided. Either this or the explicit "
"swig executable option must be provided."))
parser.add_argument(
"--framework",
action="store_true",
help="Prepare as OS X-style framework.")
parser.add_argument(
"--generate-dependency-file",
"-M",
action="store_true",
help="Make the dependency (.d) file for the wrappers.")
parser.add_argument(
"--prefix",
help="Override path where the LLDB module is placed.")
parser.add_argument(
"--src-root",
"--srcRoot",
"-s",
# Default to the parent directory of this script's directory.
default=os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.path.pardir)),
help="Specifies the LLDB source root directory.")
parser.add_argument(
"--swig-executable",
"--swigExecutable",
help="Path to the swig executable.")
parser.add_argument(
"--target-dir",
"--targetDir",
required=True,
help=(
"Specifies the build dir where the language binding "
"should be placed"))
parser.add_argument(
"--target-platform",
help=(
"Specifies the platform we are building for."
"Should be the same as what platform.system() returns."))
# Process args.
options = parser.parse_args(args)
# Set logging level based on verbosity count.
if options.debug:
log_level = logging.DEBUG
else:
# See logging documentation for error levels. We'll default
# to showing ERROR or higher error messages. For each -v
# specified, we'll shift to the next lower-priority log level.
log_level = logging.ERROR - 10 * options.verbose
if log_level < logging.NOTSET:
# Displays all logged messages.
log_level = logging.NOTSET
logging.basicConfig(level=log_level)
logging.info("logging is using level: %d", log_level)
return options
def main(args):
"""Drives the main script preparation steps.
@param args list of command line arguments.
"""
# Process command line arguments.
options = process_args(args)
logging.debug("Processed args: options=%s", options)
# Ensure we have a swig executable.
if not options.swig_executable or len(options.swig_executable) == 0:
if options.find_swig:
try:
options.swig_executable = fs.find_executable("swig")
except Exception as e:
logging.error("Unable to find swig executable: %s" % e.message)
sys.exit(-6)
else:
logging.error(
"The --find-swig option must be specified "
"when the swig executable location is not "
"explicitly provided.")
sys.exit(-12)
# Check if the swig file exists.
swig_path = os.path.normcase(
os.path.join(options.src_root, "scripts", "lldb.swig"))
if not os.path.isfile(swig_path):
logging.error("swig file not found at '%s'", swig_path)
sys.exit(-3)
# Prepare bindings for each supported language binding.
# This will error out if it doesn't succeed.
prepare_all_bindings(options)
sys.exit(0)
if __name__ == "__main__":
# Run the main driver loop.
main(sys.argv[1:])
|
|
#!/usr/bin/env python
from data_iterator import *
from state import *
from session_encdec import *
from utils import *
import time
import traceback
import os.path
import sys
import argparse
import cPickle
import logging
import search
import pprint
import numpy
import collections
import signal
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
logger = logging.getLogger(__name__)
### Unique RUN_ID for this execution
RUN_ID = str(time.time())
### Additional measures can be set here
measures = ["train", "valid"]
def init_timings():
timings = {}
for m in measures:
timings[m] = []
return timings
def save(model, timings):
print "Saving the model..."
# ignore keyboard interrupt while saving
start = time.time()
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
model.save(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + '_model.npz')
cPickle.dump(model.state, open(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + '_state.pkl', 'w'))
numpy.savez(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + '_timing.npz', **timings)
signal.signal(signal.SIGINT, s)
print "Model saved, took {}".format(time.time() - start)
def load(model, filename):
print "Loading the model..."
# ignore keyboard interrupt while saving
start = time.time()
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
model.load(filename)
signal.signal(signal.SIGINT, s)
print "Model loaded, took {}".format(time.time() - start)
def main(args):
logging.basicConfig(
level = logging.INFO,
format = "%(asctime)s: %(name)s: %(levelname)s: %(message)s")
state = eval(args.prototype)()
timings = init_timings()
if args.resume != "":
logger.debug("Resuming %s" % args.resume)
state_file = args.resume + '_state.pkl'
timings_file = args.resume + '_timing.npz'
if os.path.isfile(state_file) and os.path.isfile(timings_file):
logger.debug("Loading previous state")
state = cPickle.load(open(state_file, 'r'))
timings = dict(numpy.load(open(timings_file, 'r')))
for x, y in timings.items():
timings[x] = list(y)
else:
raise Exception("Cannot resume, cannot find files!")
logger.info("State:\n{}".format(pprint.pformat(state)))
logger.info("Timings:\n{}".format(pprint.pformat(timings)))
model = SessionEncoderDecoder(state)
rng = model.rng
if args.resume != "":
filename = args.resume + '_model.npz'
if os.path.isfile(filename):
logger.info("Loading previous model")
load(model, filename)
else:
raise Exception("Cannot resume, cannot find model file!")
else:
# assign new run_id key
model.state['run_id'] = RUN_ID
logger.info("Compile trainer")
train_batch = model.build_train_function()
eval_batch = model.build_eval_function()
random_sampler = search.RandomSampler(model)
logger.info("Load data")
train_data, valid_data = get_batch_iterator(rng, state)
train_data.start()
# Start looping through the dataset
step = 0
patience = state['patience']
start_time = time.time()
train_cost = 0
train_done = 0
ex_done = 0
while step < state['loop_iters'] and patience >= 0:
# Sample stuff
if step % 200 == 0:
for param in model.params:
print "%s = %.4f" % (param.name,
numpy.sum(param.get_value() ** 2) ** 0.5)
samples, costs = random_sampler.sample([[]], n_samples=1, n_turns=3)
print "Sampled : {}".format(samples[0])
# Training phase
batch = train_data.next()
# Train finished
if not batch:
# Restart training
logger.debug("Got None...")
break
c = train_batch(
batch['x'], batch['y'], batch['max_length'], batch['x_mask'])
if numpy.isinf(c) or numpy.isnan(c):
logger.warn("Got NaN cost .. skipping")
continue
train_cost += c
train_done += batch['num_preds']
this_time = time.time()
if step % state['train_freq'] == 0:
elapsed = this_time - start_time
h, m, s = ConvertTimedelta(this_time - start_time)
print ".. %.2d:%.2d:%.2d %4d mb # %d bs %d maxl %d acc_cost = %.4f" % (h, m, s,\
state['time_stop'] - (time.time() - start_time)/60.,\
step, \
batch['x'].shape[1], \
batch['max_length'], \
float(train_cost/train_done))
if valid_data is not None and\
step % state['valid_freq'] == 0 and step > 1:
valid_data.start()
valid_cost = 0
valid_done = 0
logger.debug("[VALIDATION START]")
while True:
batch = valid_data.next()
# Train finished
if not batch:
break
if numpy.isinf(c) or numpy.isnan(c):
continue
c = eval_batch(
batch['x'], batch['y'], batch['max_length'], batch['x_mask'])
valid_cost += c
valid_done += batch['num_preds']
logger.debug("[VALIDATION END]")
valid_cost /= valid_done
if len(timings["valid"]) == 0 or valid_cost < numpy.min(numpy.array(timings["valid"])):
patience = state['patience']
# Saving model if decrease in validation cost
save(model, timings)
elif valid_cost >= timings["valid"][-1] * state['cost_threshold']:
patience -= 1
print "** validation error = %.4f, patience = %d" % (float(valid_cost), patience)
timings["train"].append(train_cost/train_done)
timings["valid"].append(valid_cost)
# Reset train cost and train done
train_cost = 0
train_done = 0
step += 1
logger.debug("All done, exiting...")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--resume", type=str, default="", help="Resume training from that state")
parser.add_argument("--prototype", type=str, help="Use the prototype", default='prototype_state')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# change log:
# 2016-02-04 - RP
# altered functions: load(), long and short_sma() etc., enabled passing args w/o prompts
# in sign_sequence: got rid of consolidation to single column
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals
)
import numpy as np
import pandas as pd
import Quandl
import os
# from sqlalchemy import sqlalchemy # from flask.ext.
class twitfin(object):
"""This is a description of the class."""
#: An example class variable.
aClassVariable = True
def __init__(self, argumentName, anOptionalArg=None):
"""Initialization method.
:param argumentName: an example argument.
:type argumentName: string
:param anOptionalArg: an optional argument.
:type anOptionalArg: string
:returns: New instance of :class:`twitfin`
:rtype: twitfin
"""
self.instanceVariable1 = argumentName
if self.aClassVariable:
print('Hello')
if anOptionalArg:
print('anOptionalArg: %s' % anOptionalArg)
def load(*args, **kwargs):
"""Load data from Quandl into a dataframe, modify column names and
check for non-numeric values."""
# Grab the Quandl token
# token = os.environ.get('QUANDL_TOKEN')
# if token is None:
if 'token' in kwargs:
token = kwargs['token']
else:
token = raw_input("Enter Quandl token: ")
if 'ticker' in kwargs:
ticker = kwargs['ticker']
else:
ticker = raw_input("Enter Quandl ticker symbol (or hit Enter for default of YAHOO/INDEX_GSPC): ")
if len(ticker) < 1:
ticker = 'YAHOO/INDEX_GSPC'
print(ticker)
if 'start_date' in kwargs:
start_date = kwargs['start_date']
else:
start_date = raw_input("Enter start date as YYYY-MM-DD (or hit ENTER for default of 1990-01-01): ")
if len(start_date) < 1:
start_date = '1990-01-01'
print(start_date)
# Call Quandl module, trim input by default from 1990 forward
print('Pulling Quandl data...')
df = Quandl.get(ticker, authtoken=token, trim_start=start_date)
# Get the column labels
# old_columns = list(df.columns.values)
# Use the ticker symbol as our new prefix
# ticker_tag = ticker.split('_')[-1] + '_'
# Drop spaces and concatenate
# new_labels = [ticker_tag + i.replace(' ', '') for i in old_columns]
# Create a dictionary of old and new column labels
# new_columns = dict(zip(old_columns, new_labels))
# Rename the columns using our dictionary
# df = df.rename(columns=new_columns)
nulls = df[~df.applymap(np.isreal).all(1)]
# Check for non-numeric values
if len(nulls) > 0:
raise ValueError('Dataframe contains non-numeric values')
row_count = len(df)
print('%d rows loaded into dataframe.' % row_count)
return df
def long_sma(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
### SMA function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column
# 3rd parameter: the period for the moving average
# 4th paramter, optional: supply a label to be appended with period info,
# for example df = twitfin.sma(df, 'GSPC_Close', 20, label='Close')
# will result in a column label of 'Close_20-day'.
# The default label is constructed as follows:
# SMA_{ target column }_{ period }-day
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the period in days for the long SMA: "))
# to add: default period = 26
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def short_sma(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the period in days for the short SMA: "))
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def diff(df, column_a, column_b, **kwargs):
"""Creates a new column from the differnce of column_a and column_b,
as column_a minus column_b."""
### diff function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column_a
# 3rd parameter: target column_b
# TODO: describe default label and custom label options
column_a_suffix = column_a.split('_')[-1]
column_b_suffix = column_b.split('_')[-1]
column_prefix = "_".join(column_b.split('_')[0:2])
if 'label' in kwargs:
column_label = kwargs['label']
else:
column_label = 'Delta_' + column_prefix + '_' + column_a_suffix + '_' + column_b_suffix
df[column_label] = df[column_a] - df[column_b]
return df
def macd(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the period in days for the SMA of the MACD: "))
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def flag_swings(df, column, *args, **kwargs):
"""Given a dataframe and column and a minimum sequence period
for the same sign, the function returns: "1" for upward swings,
"-1" for downward swings, or "0" if niether condition is met."""
### flag_swings function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column
# 3rd parameter: minimum swing period
# TODO: describe default label and custom label options
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the period in days to flag swings: "))
if 'label' in kwargs:
# Append custom label with period days
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SwingFlag_' + str(period) + '-day'
# Trim null value artifacts in SMA columns
df = df.dropna()
# Create a temporary dataframe
tmp = df.copy()
tmp['sum'] = 0
# Determine the sign of each day and sum signs from prior days using the
# "x-day" notation as "sign-'reference day'"
tmp['sign-0'] = [1 if x >= 0 else -1 for x in df[column]]
if period < 2:
raise ValueError('The minimum swing period should be 2 days.')
else:
# Shift rows down for lateral comparison depending on period
for i in range(1, period):
label = 'sign-' + str(i)
tmp[label] = tmp['sign-0'].shift(i)
# The sum of consecutive signs agregates here
tmp['sum'] = tmp['sum'] + tmp[label]
# The we shift the sum signs by one to compare prior sequence history
tmp['sum-shift'] = tmp['sum'].shift(1)
def flagger(sign_now, sign_prior, sign_run, sign_sum, period):
# flagger contains the logical for lateral comparison of time-shifted
# sign data, agregations and time-shifted agregations
if sign_now > sign_prior and abs(sign_run) >= period - 1 and sign_sum != 0:
# Indicates a positive sign after a sufficient period of negative signs
return 1 # Also referred to here as an upward swing or crossover
else:
if sign_now < sign_prior and abs(sign_run) >= period - 1 and sign_sum != 0:
# Indicates a negative sign after a sufficient period of positive signs
return -1 # Also referred to here as an downward swing or crossover
else:
# Otherwaise returning zero. Zero could still be a sign change
# but prior minimum sign sequence period criteria was not met.
return 0
try:
df = df.copy()
df[column_label] = [flagger(n, p, r, s, period) for n, p, r, s in zip(tmp['sign-0'], tmp['sign-1'], tmp['sum-shift'], tmp['sum'])]
except Exception as e:
print(e)
if e =='SettingWithCopyWarning':
pass
return df
def transpose_column(df, column, *args, **kwargs):
"""Given a dataframe and column, returns df with added columns
of prior date data for the given period."""
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the days prior to list the signs: "))
# Trim null value artifacts in SMA columns
df = df.dropna()
# Create a temporary dataframe
tmp = df.copy()
# Determine the sign of each day and sum signs from prior days using the
# "x-day" notation
label0 = column + '-0'
tmp[label0] = df[column]
# Shift rows down for lateral comparison depending on period
for i in range(1, period):
label = column +'-'+ str(i)
tmp[label] = tmp[label0].shift(i)
# get rid of NA rows
tmp2 = tmp.ix[(period -1):]
return tmp2
def sign_sequence(df, column, *args, **kwargs):
"""Given a dataframe and column, returns a column with a list
of prior signs for the given period."""
if 'period' in kwargs:
period = kwargs['period']
else:
period = int(raw_input("Enter the days prior to list the signs: "))
# prior_signs_label = 'SignSequence_' + str(period) + '-days'
# Trim null value artifacts in SMA columns
df = df.dropna()
# Create a temporary dataframe
tmp = df.copy()
# Determine the sign of each day and sum signs from prior days using the
# "x-day" notation as "sign-'reference day'"
label0 = column + '_sign-0'
tmp[label0] = ['1' if x >= 0 else '-1' for x in df[column]]
# Shift rows down for lateral comparison depending on period
# labels = ['sign-0']
for i in range(1, period):
label = column +'_sign-'+ str(i)
# labels.append(label)
tmp[label] = tmp[label0].shift(i)
# get rid of NA rows
tmp2 = tmp.ix[(period -1):]
return tmp2
# Get rid of consolidation under one column
# df2 = df.ix[(period -1):]
# labels = labels[::-1]
# try:
# df2 = df2.copy()
# df2[prior_signs_label] = tmp2[labels].apply(lambda x: ','.join(x), axis=1)
# except Exception as e:
# print(e)
# if e =='SettingWithCopyWarning':
# pass
# return df2
def x_days(df):
"""Add a column with a descending counter."""
# One paramter: target dataframe
df['x-day'] = ['x-' + str(i) for i in range(len(df) - 1, -1, -1)]
return df
def x_transpose(df):
"""Transpose the dataframe and set the x-days as the column labels."""
# One paramter: target dataframe, assumes an x-day column has been created
df = df.set_index('x-day')
df = df.transpose()
pd.options.display.float_format = '{:.3f}'.format
return df
def read_csv(filename, *args, **kwargs):
"""read_csv is a port of the Pandas read_csv module."""
return pd.read_csv(filename, *args)
def read_sql(table, db, *args, **kwargs):
"""read_sql is a port of the Pandas read_sql module."""
return pd.read_sql(table, db, *args, **kwargs)
def db_connection(uri):
"""db_connection is a port of the SQLAlchemy create_engine module."""
return sqlalchemy.create_engine(uri)
# Execute example IO utilities
# To write data to csv
# df.to_csv('data/example.csv')
# print('Modified dataframe saved to: data/standard-example.csv')
# print('\nData saved.')
# To read data from csv
# df = read_csv('data/example.csv')
# df = df.set_index('Date')
# print('\nData read from csv:')
# print(df_test.tail())
# To write data to sql table
# db = db_connection('sqlite:///data/dev.db')
# df.to_sql('example', db, if_exists='replace')
# print('\nData saved to data/dev.db/gspc')
# To read data from sql table
# df = read_sql('example', db)
# df = df.set_index('Date')
# print('\nData read from sql:')
# print(df_test.tail())
|
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ungettext, ugettext as _
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i) for i in xrange(self.total_form_count())]
return forms
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and not 'initial' in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
extra += min_num
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
|
|
def _reset_sys_path():
# Clear generic sys.path[0]
import sys, os
resources = os.environ['RESOURCEPATH']
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
def _site_packages():
import site, sys, os
paths = []
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
paths.append(os.path.join(prefix, 'lib', 'python' + sys.version[:3],
'site-packages'))
if os.path.join('.framework', '') in os.path.join(sys.prefix, ''):
home = os.environ.get('HOME')
if home:
paths.append(os.path.join(home, 'Library', 'Python',
sys.version[:3], 'site-packages'))
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
_site_packages()
"""
sys.argv emulation
This module starts a basic event loop to collect file- and url-open AppleEvents. Those get
converted to strings and stuffed into sys.argv. When that is done we continue starting
the application.
This is a workaround to convert scripts that expect filenames on the command-line to work
in a GUI environment. GUI applications should not use this feature.
NOTE: This module uses ctypes and not the Carbon modules in the stdlib because the latter
don't work in 64-bit mode and are also not available with python 3.x.
"""
import sys
import os
import time
import ctypes
import struct
class AEDesc (ctypes.Structure):
_fields_ = [
('descKey', ctypes.c_int),
('descContent', ctypes.c_void_p),
]
class EventTypeSpec (ctypes.Structure):
_fields_ = [
('eventClass', ctypes.c_int),
('eventKind', ctypes.c_uint),
]
def _ctypes_setup():
carbon = ctypes.CDLL('/System/Library/Carbon.framework/Carbon')
timer_func = ctypes.CFUNCTYPE(
None, ctypes.c_void_p, ctypes.c_long)
ae_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p)
carbon.AEInstallEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_void_p, ctypes.c_char ]
carbon.AERemoveEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_char ]
carbon.AEProcessEvent.restype = ctypes.c_int
carbon.AEProcessEvent.argtypes = [ctypes.c_void_p]
carbon.ReceiveNextEvent.restype = ctypes.c_int
carbon.ReceiveNextEvent.argtypes = [
ctypes.c_long, ctypes.POINTER(EventTypeSpec),
ctypes.c_double, ctypes.c_char,
ctypes.POINTER(ctypes.c_void_p)
]
carbon.AEGetParamDesc.restype = ctypes.c_int
carbon.AEGetParamDesc.argtypes = [
ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.POINTER(AEDesc)]
carbon.AECountItems.restype = ctypes.c_int
carbon.AECountItems.argtypes = [ ctypes.POINTER(AEDesc),
ctypes.POINTER(ctypes.c_long) ]
carbon.AEGetNthDesc.restype = ctypes.c_int
carbon.AEGetNthDesc.argtypes = [
ctypes.c_void_p, ctypes.c_long, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p ]
carbon.AEGetDescDataSize.restype = ctypes.c_int
carbon.AEGetDescDataSize.argtypes = [ ctypes.POINTER(AEDesc) ]
carbon.AEGetDescData.restype = ctypes.c_int
carbon.AEGetDescData.argtypes = [
ctypes.POINTER(AEDesc),
ctypes.c_void_p,
ctypes.c_int,
]
carbon.FSRefMakePath.restype = ctypes.c_int
carbon.FSRefMakePath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
return carbon
def _run_argvemulator(timeout = 60):
# Configure ctypes
carbon = _ctypes_setup()
# Is the emulator running?
running = [True]
timeout = [timeout]
# Configure AppleEvent handlers
ae_callback = carbon.AEInstallEventHandler.argtypes[2]
kAEInternetSuite, = struct.unpack('>i', b'GURL')
kAEISGetURL, = struct.unpack('>i', b'GURL')
kCoreEventClass, = struct.unpack('>i', b'aevt')
kAEOpenApplication, = struct.unpack('>i', b'oapp')
kAEOpenDocuments, = struct.unpack('>i', b'odoc')
keyDirectObject, = struct.unpack('>i', b'----')
typeAEList, = struct.unpack('>i', b'list')
typeChar, = struct.unpack('>i', b'TEXT')
typeFSRef, = struct.unpack('>i', b'fsrf')
FALSE = b'\0'
TRUE = b'\1'
eventLoopTimedOutErr = -9875
kEventClassAppleEvent, = struct.unpack('>i', b'eppc')
kEventAppleEvent = 1
@ae_callback
def open_app_handler(message, reply, refcon):
# Got a kAEOpenApplication event, which means we can
# start up. On some OSX versions this event is even
# sent when an kAEOpenDocuments or kAEOpenURLs event
# is sent later on.
#
# Therefore don't set running to false, but reduce the
# timeout to at most two seconds beyond the current time.
timeout[0] = min(timeout[0], time.time() - start + 2)
#running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, 0, FALSE)
@ae_callback
def open_file_handler(message, reply, refcon):
listdesc = AEDesc()
sts = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeFSRef, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
fsref = buf
buf = ctypes.create_string_buffer(1024)
sts = carbon.FSRefMakePath(ctypes.byref(fsref), buf, 1023)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, 0, FALSE)
@ae_callback
def open_url_handler(message, reply, refcon):
listdesc = AEDesc()
ok = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if ok != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open url event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeChar, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open URL event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open URL event")
else:
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, 0, FALSE)
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1].startswith('-psn_'):
del sys.argv[1]
start = time.time()
now = time.time()
eventType = EventTypeSpec()
eventType.eventClass = kEventClassAppleEvent
eventType.eventKind = kEventAppleEvent
while running[0] and now - start < timeout[0]:
event = ctypes.c_void_p()
sts = carbon.ReceiveNextEvent(1, ctypes.byref(eventType),
start + timeout[0] - now, TRUE, ctypes.byref(event))
if sts == eventLoopTimedOutErr:
break
elif sts != 0:
print("argvemulator warning: fetching events failed")
break
sts = carbon.AEProcessEvent(event)
if sts != 0:
print("argvemulator warning: processing events failed")
break
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, FALSE)
carbon.AERemoveEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, FALSE)
def _argv_emulation():
import sys, os
# only use if started by LaunchServices
if os.environ.get('_PY2APP_LAUNCHED_'):
_run_argvemulator()
_argv_emulation()
def _chdir_resource():
import os
os.chdir(os.environ['RESOURCEPATH'])
_chdir_resource()
def _setup_ctypes():
from ctypes.macholib import dyld
import os
frameworks = os.path.join(os.environ['RESOURCEPATH'], '..', 'Frameworks')
dyld.DEFAULT_FRAMEWORK_FALLBACK.insert(0, frameworks)
dyld.DEFAULT_LIBRARY_FALLBACK.insert(0, frameworks)
_setup_ctypes()
def _path_inject(paths):
import sys
sys.path[:0] = paths
_path_inject(['/Users/Will/workspace/csvExport/src'])
import re, sys
cookie_re = re.compile(b"coding[:=]\s*([-\w.]+)")
if sys.version_info[0] == 2:
default_encoding = 'ascii'
else:
default_encoding = 'utf-8'
def guess_encoding(fp):
for i in range(2):
ln = fp.readline()
m = cookie_re.search(ln)
if m is not None:
return m.group(1).decode('ascii')
return default_encoding
def _run():
global __file__
import os, site
sys.frozen = 'macosx_app'
argv0 = os.path.basename(os.environ['ARGVZERO'])
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT)
sys.argv[0] = __file__ = script
if sys.version_info[0] == 2:
with open(script, 'rU') as fp:
source = fp.read() + "\n"
else:
with open(script, 'rb') as fp:
encoding = guess_encoding(fp)
with open(script, 'r', encoding=encoding) as fp:
source = fp.read() + '\n'
exec(compile(source, script, 'exec'), globals(), globals())
DEFAULT_SCRIPT='/Users/Will/workspace/csvExport/src/csvExport.py'
SCRIPT_MAP={}
try:
_run()
except KeyboardInterrupt:
pass
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
# TODO(virimia): Add a benchmark for gather_v2, with batch_dims and axis set.
def _to_str_elements(values):
"""Converts the inner list elements to strings."""
if isinstance(values, list):
return [_to_str_elements(value) for value in values]
else:
return str(values).encode("utf-8")
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
@test_util.run_deprecated_v1
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.cached_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
if dtype.is_integer:
self.assertEqual(params_grad, None)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
@test_util.run_deprecated_v1
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1).eval())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
@test_util.run_deprecated_v1
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
@test_util.disable_xla(
"Assertion inside an op is not supported in XLA. Instead XLA clamps the "
"index to be in bounds and returns the indexed value there (Don't rely "
"on this behavior).")
def testBadIndicesCPU(self):
with test_util.force_cpu():
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=0))
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=1))
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
@test_util.run_deprecated_v1
def testBadAxis(self):
with self.session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
@test_util.run_deprecated_v1
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1
# (equivalent to tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=-1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=-1,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
# axis > 0
dict( # 3D indices, batch_dims=1, axis=2
# params.shape = [I1, J1, J2] = [2, 2, 3]
# indices.shape = [I1, K1, K2] = [2, 1, 5]
# result.shape = [I1, J1, K1, K2] = [2, 2, 1, 5]
batch_dims=1,
axis=2,
params=[[[10, 11, 12], [13, 14, 15]], [[20, 21, 22], [23, 24, 25]]],
indices=[[[0, 1, 2, 1, 0]], [[0, 1, 2, 1, 0]]],
expected=[[[[10, 11, 12, 11, 10]], [[13, 14, 15, 14, 13]]],
[[[20, 21, 22, 21, 20]], [[23, 24, 25, 24, 23]]]]),
dict( # 3D indices, batch_dims=None, axis=1
batch_dims=None,
axis=1,
params=[[10, 11, 12], [13, 14, 15]],
indices=[1, 0],
expected=[[11, 10], [14, 13]]),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDims(self, params, indices, batch_dims, expected=None,
axis=None):
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
# Test the gradients shape.
if context.executing_eagerly():
with backprop.GradientTape() as tape:
zeros = array_ops.zeros_like(params, dtype=dtypes.float32)
tape.watch(zeros)
values = zeros * 2 + zeros
result = array_ops.gather(
values, indices, axis=axis, batch_dims=batch_dims)
gradients = tape.gradient(result, zeros)
else:
zeros = array_ops.zeros_like(params, dtype=dtypes.float32)
values = zeros * 2 + zeros
result = array_ops.gather(
values, indices, axis=axis, batch_dims=batch_dims)
gradients = gradients_impl.gradients(result, [zeros])[0]
self.assertAllEqual(array_ops.shape(params), array_ops.shape(gradients))
# Run the same test for strings.
params = _to_str_elements(params)
expected = _to_str_elements(expected)
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=4,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=5,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-4,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-2,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-1,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDimsMatchesPythonBatching(self, params_shape, indices_shape,
batch_dims, axis, output_shape):
"""Checks that batch_dims matches multiple calls to tf.gather()."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size), indices_shape)
indices = indices % params_shape[axis]
# Perform repeated (batched) gather operations with numpy, to find the
# expected result.
expected = self._batchNumpyGather(params, indices, axis, batch_dims)
# On Windows, we get an exception if we pass in the transformed numpy
# arrays ("Failed to convert numpy ndarray to a Tensor (Unsupported
# feed type)."); so convert them back to lists before calling tf.gather.
params = params.tolist()
indices = indices.tolist()
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
# Run the same test for strings.
params = _to_str_elements(params)
expected = _to_str_elements(expected.tolist())
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
def _batchNumpyGather(self, params, indices, axis, batch_dims):
"""Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array
"""
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([
self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1)
for i in range(params.shape[0])
])
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherV2", gather.op.name)
self.assertAllEqual([[1, 2], [5, 6]], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherResourceVariable(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGather", gather.op.inputs[0].op.type)
self.assertAllEqual([[1, 2], [5, 6]], gather)
if __name__ == "__main__":
test.main()
|
|
# VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import time
import requests
from StringIO import StringIO
from schema.vcd.v1_5.schemas.vcloud import vAppType, vdcType, queryRecordViewType, taskType, vcloudType
from schema.vcd.v1_5.schemas.vcloud.taskType import TaskType
from schema.vcd.v1_5.schemas.vcloud.vAppType import VAppType, NetworkConnectionSectionType
from iptools import ipv4, IpRange
from pyvcloud.helper import CommonUtils
from pyvcloud import _get_logger, Http, Log
VCLOUD_STATUS_MAP = {
-1: "Could not be created",
0: "Unresolved",
1: "Resolved",
2: "Deployed",
3: "Suspended",
4: "Powered on",
5: "Waiting for user input",
6: "Unknown state",
7: "Unrecognized state",
8: "Powered off",
9: "Inconsistent state",
10: "Children do not all have the same status",
11: "Upload initiated, OVF descriptor pending",
12: "Upload initiated, copying contents",
13: "Upload initiated , disk contents pending",
14: "Upload has been quarantined",
15: "Upload quarantine period has expired"
}
class VAPP(object):
def __init__(self, vApp, headers, verify, log=False):
self.me = vApp
self.headers = headers
self.verify = verify
self.response = None
self.logger = _get_logger() if log else None
@property
def name(self):
return self.me.get_name()
def execute(self, operation, http, body=None, targetVM=None):
"""
Execute an operation against a VM as an Asychronous Task.
:param operation: (str): The command to execute
:param http: (str): The http operation.
:param body: (str, optional): a body for the http request
:param targetVM: (str, optional): The name of the VM that will be the target of the request.
:return: (TaskType or Bool) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n
Or False if the request failed, error and debug level messages are logged.
"""
vApp = targetVM if targetVM else self.me
link = filter(lambda link: link.get_rel() == operation, vApp.get_Link())
if not link:
Log.error(self.logger, "link not found; rel=%s" % operation)
Log.debug(self.logger, "vApp href=%s, name=%s" % (vApp.get_href(), vApp.get_name()))
return False
else:
if http == "post":
headers = self.headers
if body and body.startswith('<DeployVAppParams '):
headers['Content-type'] = 'application/vnd.vmware.vcloud.deployVAppParams+xml'
elif body and body.startswith('<UndeployVAppParams '):
headers['Content-type'] = 'application/vnd.vmware.vcloud.undeployVAppParams+xml'
elif body and body.startswith('<CreateSnapshotParams '):
headers['Content-type'] = 'application/vnd.vmware.vcloud.createSnapshotParams+xml'
self.response = Http.post(link[0].get_href(), data=body, headers=headers, verify=self.verify, logger=self.logger)
elif http == "put":
self.response = Http.put(link[0].get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger)
else:
self.response = Http.delete(link[0].get_href(), headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
Log.debug(self.logger, "failed; response status=%d, content=%s" % (self.response.status_code, self.response.text))
return False
def deploy(self, powerOn=True):
"""
Deploy the vapp
:param powerOn: (bool, optional): Power on the vApp and its contained VMs after deployment.
:return: (bool): True if the user was vApp was successfully deployed, False otherwise.
"""
powerOnValue = 'true' if powerOn else 'false'
deployVAppParams = vcloudType.DeployVAppParamsType()
deployVAppParams.set_powerOn(powerOnValue)
body = CommonUtils.convertPythonObjToStr(deployVAppParams, name = "DeployVAppParams",
namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"')
return self.execute("deploy", "post", body=body)
def undeploy(self, action='powerOff'):
"""
Undeploy the vapp
:param action: (bool, optional): Power on the vApp and its contained VMs after deployment.
* The valid values of action are
- **powerOff** (Power off the VMs. This is the default action if
this attribute is missing or empty),
- **suspend** (Suspend the VMs), shutdown (Shut down the VMs),
- **force** (Attempt to power off the VMs. Failures in undeploying the VM or associated networks
are ignored. All references to the vApp and its VMs are removed from the database),
- **default** (Use the actions, order, and delay specified in the StartupSection).
:returns: (bool): True if the user was vApp was successfully deployed, False otherwise.
"""
undeployVAppParams = vcloudType.UndeployVAppParamsType()
undeployVAppParams.set_UndeployPowerAction(action)
body = CommonUtils.convertPythonObjToStr(undeployVAppParams, name = "UndeployVAppParams",
namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"')
return self.execute("undeploy", "post", body=body)
def reboot(self):
"""
Reboot the vApp
:returns: (None)
"""
self.execute("power:reboot", "post")
def poweron(self):
"""
Power on the vApp
:returns: (None)
"""
return self.execute("power:powerOn", "post")
def poweroff(self):
"""
Power off the vApp
:returns: (None)
"""
return self.execute("power:powerOff", "post")
def shutdown(self):
"""
Shutdown the vApp
:returns: (None)
"""
return self.execute("power:shutdown", "post")
def suspend(self):
"""
Suspend the vApp
:returns: (None)
"""
self.execute("power:suspend", "post")
def reset(self):
"""
Reset the vApp
:returns: (None)
"""
self.execute("power:reset", "post")
def delete(self):
"""
Delete the vApp
Note: The vApp must be undeployed and power it off before it is deleted.
:returns: (None)
"""
return self.execute("remove", "delete")
def create_snapshot(self):
"""
Create a new snapshot of the vApp state.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
snapshot_name = '{}_snapshot_{}'.format(self.name, int(round(time.time() * 1000)))
createSnapshotParams = vcloudType.CreateSnapshotParamsType()
createSnapshotParams.set_name(snapshot_name)
createSnapshotParams.set_Description(snapshot_name)
body = CommonUtils.convertPythonObjToStr(createSnapshotParams, name="CreateSnapshotParams",
namespacedef='xmlns="http://www.vmware.com/vcloud/v1.5"')
return self.execute("snapshot:create", "post", body)
def revert_snapshot(self):
"""
Revert to a previous vApp snapshot.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
return self.execute("snapshot:revertToCurrent", "post")
def delete_snapshot(self):
"""
Delete an existing snapshot.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
return self.execute("snapshot:removeAll", "post")
@staticmethod
def create_networkConfigSection(network_name, network_href, fence_mode):
parentNetwork = vcloudType.ReferenceType(href=network_href, name=network_name)
configuration = vcloudType.NetworkConfigurationType()
configuration.set_ParentNetwork(parentNetwork)
configuration.set_FenceMode(fence_mode)
networkConfig = vcloudType.VAppNetworkConfigurationType()
networkConfig.set_networkName(network_name)
networkConfig.set_Configuration(configuration)
info = vcloudType.Msg_Type()
info.set_valueOf_("Configuration parameters for logical networks")
networkConfigSection = vcloudType.NetworkConfigSectionType()
networkConfigSection.add_NetworkConfig(networkConfig)
networkConfigSection.set_Info(vAppType.cimString(valueOf_="Network config"))
return networkConfigSection
def connect_vms(self, network_name, connection_index,
connections_primary_index=None, ip_allocation_mode='DHCP',
mac_address=None, ip_address=None):
"""
Attach vms to a virtual network.
something helpful.
:param network_name: (str): The network name to connect the VM to.
:param connection_index: (str): Virtual slot number associated with this NIC. First slot number is 0.
:param connections_primary_index: (str): Virtual slot number associated with the NIC that should be considered this \n
virtual machine's primary network connection. Defaults to slot 0.
:param ip_allocation_mode: (str, optional): IP address allocation mode for this connection.
* One of:
- POOL (A static IP address is allocated automatically from a pool of addresses.)
- DHCP (The IP address is obtained from a DHCP service.)
- MANUAL (The IP address is assigned manually in the IpAddress element.)
- NONE (No IP addressing mode specified.)
:param mac_address: (str): the MAC address associated with the NIC.
:param ip_address: (str): the IP address assigned to this NIC.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
children = self.me.get_Children()
if children:
vms = children.get_Vm()
for vm in vms:
new_connection = self._create_networkConnection(
network_name, connection_index, ip_allocation_mode,
mac_address, ip_address)
networkConnectionSection = [section for section in vm.get_Section() if isinstance(section, NetworkConnectionSectionType)][0]
self._modify_networkConnectionSection(
networkConnectionSection,
new_connection,
connections_primary_index)
output = StringIO()
networkConnectionSection.export(output,
0,
name_ = 'NetworkConnectionSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body=output.getvalue().replace("vmw:Info", "ovf:Info")
self.response = Http.put(vm.get_href() + "/networkConnectionSection/", data=body, headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
def disconnect_vms(self, network_name=None):
"""
Disconnect the vm from the vapp network.
:param network_name: (string): The name of the vApp network. If None, then disconnect from all the networks.
:return: (bool): True if the user was vApp was successfully deployed, False otherwise.
"""
children = self.me.get_Children()
if children:
vms = children.get_Vm()
for vm in vms:
Log.debug(self.logger, "child VM name=%s" % vm.get_name())
networkConnectionSection = [section for section in vm.get_Section() if isinstance(section, NetworkConnectionSectionType)][0]
found = -1
if network_name is None:
networkConnectionSection.set_NetworkConnection([])
found = 1
else:
for index, networkConnection in enumerate(networkConnectionSection.get_NetworkConnection()):
if networkConnection.get_network() == network_name:
found = index
break
if found != -1:
networkConnectionSection.NetworkConnection.pop(found)
if found != -1:
output = StringIO()
networkConnectionSection.export(output,
0,
name_ = 'NetworkConnectionSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body=output.getvalue().replace("vmw:Info", "ovf:Info")
self.response = Http.put(vm.get_href() + "/networkConnectionSection/", data=body, headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
task = TaskType()
task.set_status("success")
task.set_Progress("100")
return task
def connect_to_network(self, network_name, network_href, fence_mode='bridged'):
"""
Connect the vApp to an existing virtual network in the VDC.
:param network_name: (str): The name of the virtual network.
:param network_href: (str): A uri that points to the network resource.
:param fence_mode: (str, optional):
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
vApp_NetworkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0]
link = [link for link in vApp_NetworkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0]
networkConfigSection = VAPP.create_networkConfigSection(network_name, network_href, fence_mode)
for networkConfig in vApp_NetworkConfigSection.get_NetworkConfig():
if networkConfig.get_networkName() == network_name:
task = TaskType()
task.set_status("success")
task.set_Progress("100")
return task
networkConfigSection.add_NetworkConfig(networkConfig)
output = StringIO()
networkConfigSection.export(output,
0,
name_ = 'NetworkConfigSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body = output.getvalue().\
replace('Info msgid=""', "ovf:Info").replace("Info", "ovf:Info").replace(":vmw", "").replace("vmw:","")\
.replace("RetainNetovf", "ovf").replace("ovf:InfoAcrossDeployments","RetainNetInfoAcrossDeployments")
self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
def disconnect_from_networks(self):
"""
Disconnect the vApp from currently connected virtual networks.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
networkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0]
link = [link for link in networkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0]
networkConfigSection.NetworkConfig[:] = []
output = StringIO()
networkConfigSection.export(output,
0,
name_ = 'NetworkConfigSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body = output.getvalue().\
replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\
replace("/Info", "/ovf:Info")
self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
def disconnect_from_network(self, network_name):
"""
Disconnect the vApp from an existing virtual network in the VDC.
:param network_name: (str): The name of the virtual network.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
"""
networkConfigSection = [section for section in self.me.get_Section() if section.__class__.__name__ == "NetworkConfigSectionType"][0]
link = [link for link in networkConfigSection.get_Link() if link.get_type() == "application/vnd.vmware.vcloud.networkConfigSection+xml"][0]
found = -1
for index, networkConfig in enumerate(networkConfigSection.get_NetworkConfig()):
if networkConfig.get_networkName() == network_name:
found = index
if found != -1:
networkConfigSection.NetworkConfig.pop(found)
output = StringIO()
networkConfigSection.export(output,
0,
name_ = 'NetworkConfigSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body = output.getvalue().\
replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\
replace("/Info", "/ovf:Info")
self.response = Http.put(link.get_href(), data=body, headers=self.headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
def attach_disk_to_vm(self, vm_name, disk_ref):
"""
Attach a disk volume to a VM.
The volume must have been previously added to the VDC.
:param vm_name: (str): The name of the vm that the disk will be attached to.
:param disk_ref: (str): The url of a disk resource.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
*Note:* A list of disk references for the vdc can be obtained using the VCA get_diskRefs() method
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) ==1:
body = """
<DiskAttachOrDetachParams xmlns="http://www.vmware.com/vcloud/v1.5">
<Disk type="application/vnd.vmware.vcloud.disk+xml"
href="%s" />
</DiskAttachOrDetachParams>
""" % disk_ref.href
return self.execute("disk:attach", "post", body=body, targetVM=vms[0])
def detach_disk_from_vm(self, vm_name, disk_ref):
"""
Detach a disk volume from a VM.
The volume must have been previously attached to the VM.
:param vm_name: (str): The name of the vm that the disk will be attached to.
:param disk_ref: (str): The url of a disk resource.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.
*Note:* A list of disk references for the vdc can be obtained using the VCA get_diskRefs() method
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) ==1:
body = """
<DiskAttachOrDetachParams xmlns="http://www.vmware.com/vcloud/v1.5">
<Disk type="application/vnd.vmware.vcloud.disk+xml"
href="%s" />
</DiskAttachOrDetachParams>
""" % disk_ref.href
return self.execute("disk:detach", "post", body=body, targetVM=vms[0])
def vm_media(self, vm_name, media, operation):
"""
Return a list of details for a media device attached to the VM.
:param vm_name: (str): The name of the vm.
:param media_name: (str): The name of the attached media.
:return: (dict) a dictionary containing media details. \n
Dictionary keys 'name','type','href'
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) ==1:
body = """
<MediaInsertOrEjectParams xmlns="http://www.vmware.com/vcloud/v1.5">
<Media
type="%s"
name="%s"
href="%s" />
</MediaInsertOrEjectParams>
""" % (media.get('name'), media.get('id'), media.get('href'))
return self.execute("media:%sMedia" % operation, "post", body=body, targetVM=vms[0])
def customize_guest_os(self, vm_name, customization_script=None,
computer_name=None, admin_password=None,
reset_password_required=False):
"""
Associate a customization script with a guest OS and execute the script.
The VMware tools must be installed in the Guest OS.
:param vm_name: (str): The name of the vm to be customized.
:param customization_script: (str, Optional): The path to a file on the local file system containing the customization script.
:param computer_name: (str, Optional): A new value for the the computer name. A default value for the template is used if a value is not set.
:param admin_password: (str, Optional): A password value for the admin/root user. A password is autogenerated if a value is not supplied.
:param reset_password_required: (bool): Force the user to reset the password on first login.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n
if the task cannot be created a debug level log message is generated detailing the reason.
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) == 1:
sections = vms[0].get_Section()
customization_section = [section for section in sections
if (section.__class__.__name__ ==
"GuestCustomizationSectionType")
][0]
customization_section.set_Enabled(True)
customization_section.set_ResetPasswordRequired(
reset_password_required)
customization_section.set_AdminAutoLogonEnabled(False)
customization_section.set_AdminAutoLogonCount(0)
if customization_script:
customization_section.set_CustomizationScript(
customization_script)
if computer_name:
customization_section.set_ComputerName(computer_name)
if admin_password:
customization_section.set_AdminPasswordEnabled(True)
customization_section.set_AdminPasswordAuto(False)
customization_section.set_AdminPassword(admin_password)
output = StringIO()
customization_section.export(output,
0,
name_ = 'GuestCustomizationSection',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body = output.getvalue().\
replace("vmw:", "").replace('Info xmlns:vmw="http://www.vmware.com/vcloud/v1.5" msgid=""', "ovf:Info").\
replace("/Info", "/ovf:Info")
headers = self.headers
headers['Content-type'] = 'application/vnd.vmware.vcloud.guestcustomizationsection+xml'
self.response = Http.put(customization_section.Link[0].href, data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
Log.debug(self.logger, "failed; response status=%d, content=%s" % (self.response.status_code, self.response.text))
def force_customization(self, vm_name, power_on=True):
"""
Force the guest OS customization script to be run for a specific vm in the vApp.
A customization script must have been previously associated with the VM
using the pyvcloud customize_guest_os method or using the vCD console
The VMware tools must be installed in the Guest OS.
:param vm_name: (str): The name of the vm to be customized.
:param power_on (bool): Wether to power the vm on after customization or not
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request.b\n
if the task cannot be created a debug level log message is generated detailing the reason.
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) == 1:
sections = vms[0].get_Section()
links = filter(lambda link: link.rel== "deploy", vms[0].Link)
if len(links) == 1:
forceCustomizationValue = 'true'
deployVAppParams = vcloudType.DeployVAppParamsType()
if power_on:
deployVAppParams.set_powerOn('true')
else:
deployVAppParams.set_powerOn('false')
deployVAppParams.set_deploymentLeaseSeconds(0)
deployVAppParams.set_forceCustomization('true')
body = CommonUtils.convertPythonObjToStr(deployVAppParams, name = "DeployVAppParams",
namespacedef = 'xmlns="http://www.vmware.com/vcloud/v1.5"')
headers = self.headers
headers['Content-type'] = 'application/vnd.vmware.vcloud.deployVAppParams+xml'
self.response = Http.post(links[0].href, data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
Log.debug(self.logger, "response status=%d, content=%s" % (self.response.status_code, self.response.text))
def get_vms_network_info(self):
"""
List details of the networks associated with each of the vms in the vApp
:return: (list) a list, one entry per vm, each vm entry contains a list, one entry per network, \n
each network entry contains a dictionary of properties for the network. \n
Dictionary keys 'network_name', 'ip', 'mac', 'is_connected', 'is_primary', 'allocation_mode'
"""
result = []
vms = self._get_vms()
for vm in vms:
nw_connections = []
sections = vm.get_Section()
networkConnectionSection = filter(lambda section: section.__class__.__name__ == "NetworkConnectionSectionType", sections)[0]
primary_index = networkConnectionSection.get_PrimaryNetworkConnectionIndex()
connections = networkConnectionSection.get_NetworkConnection()
for connection in connections:
nw_connections.append(
{'network_name': connection.get_network(),
'ip': connection.get_IpAddress(),
'mac': connection.get_MACAddress(),
'is_connected': connection.get_IsConnected(),
'is_primary': connection.get_NetworkConnectionIndex() == primary_index,
'allocation_mode': connection.get_IpAddressAllocationMode()
})
result.append(nw_connections)
return result
def customize_on_next_poweron(self):
"""
Force the guest OS customization script to be run for the first VM in the vApp.
A customization script must have been previously associated with the VM
using the pyvcloud customize_guest_os method or using the vCD console
The VMware tools must be installed in the Guest OS.
:return: (bool) True if the request was accepted, False otherwise. If False an error level log message is generated.
"""
vm = self._get_vms()[0]
link = filter(lambda link: link.get_rel() == "customizeAtNextPowerOn",
vm.get_Link())
if link:
self.response = Http.post(link[0].get_href(), data=None,
headers=self.headers, logger=self.logger)
if self.response.status_code == requests.codes.no_content:
return True
Log.error(self.logger, "link not found")
return False
def get_vms_details(self):
"""
Return a list the details for all VMs contained in the vApp.
:return: (list) a list, one entry per vm containing a (dict) of properties for the VM. \n
Dictionary keys 'name','status','cpus','memory','memory_mb','os','owner','admin_password','reset_password_required'
"""
result = []
children = self.me.get_Children()
if children:
vms = children.get_Vm()
for vm in vms:
name = vm.get_name()
status = VCLOUD_STATUS_MAP[vm.get_status()]
owner = self.me.get_Owner().get_User().get_name()
sections = vm.get_Section()
virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0]
items = virtualHardwareSection.get_Item()
cpu = filter(lambda item: item.get_Description().get_valueOf_() == "Number of Virtual CPUs", items)[0]
cpu_capacity = int(cpu.get_ElementName().get_valueOf_().split(" virtual CPU(s)")[0])
memory = filter(lambda item: item.get_Description().get_valueOf_() == "Memory Size", items)[0]
memory_capacity_mb = int(memory.get_ElementName().get_valueOf_().split(" MB of memory")[0])
memory_capacity = memory_capacity_mb / 1024
operatingSystemSection = filter(lambda section: section.__class__.__name__== "OperatingSystemSection_Type", sections)[0]
os = operatingSystemSection.get_Description().get_valueOf_()
customization_section = filter(lambda section: section.__class__.__name__== "GuestCustomizationSectionType", sections)[0]
result.append(
{'name': name,
'status': status,
'cpus': cpu_capacity,
'memory': memory_capacity,
'memory_mb': memory_capacity_mb,
'os': os,
'owner': owner,
'admin_password': customization_section.get_AdminPassword(),
'reset_password_required': customization_section.get_ResetPasswordRequired()
}
)
Log.debug(self.logger, "details of VMs: %s" % result)
return result
def modify_vm_name(self, vm_index, vm_name):
"""
Modify the name of a VM in a vApp
:param vm_index: (int):The index of the VM in the vApp 1==first VM
:param vm_name: (str): The new name of the VM.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n
if the task cannot be created a debug level log message is generated detailing the reason.
:raises: Exception: If the named VM cannot be located or another error occured.
"""
children = self.me.get_Children()
if children:
assert len(children.get_Vm()) >= vm_index
vm = children.get_Vm()[vm_index-1]
assert vm
href = vm.get_href()
vm_name_old = vm.get_name()
Log.debug(self.logger, "VM name change (%s) %s -> %s" % (vm_index, vm_name_old, vm_name))
vm.set_name(vm_name)
vm.set_Section([])
output = StringIO()
vm.export(output,
0,
name_ = 'Vm',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmw="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"',
pretty_print = True)
body = output.getvalue()
headers = self.headers
headers['Content-type'] = 'application/vnd.vmware.vcloud.vm+xml'
self.response = Http.post(href+'/action/reconfigureVm', data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
raise Exception(self.response.status_code)
raise Exception('can\'t find vm')
def modify_vm_memory(self, vm_name, new_size):
"""
Modify the virtual Memory allocation for VM.
:param vm_name: (str): The name of the vm to be customized.
:param new_size: (int): The new memory allocation in MB.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n
if the task cannot be created a debug level log message is generated detailing the reason.
:raises: Exception: If the named VM cannot be located or another error occured.
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) == 1:
sections = vm.get_Section()
virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0]
items = virtualHardwareSection.get_Item()
memory = filter(lambda item: item.get_Description().get_valueOf_() == "Memory Size", items)[0]
href = memory.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href')
en = memory.get_ElementName()
en.set_valueOf_('%s MB of memory' % new_size)
memory.set_ElementName(en)
vq = memory.get_VirtualQuantity()
vq.set_valueOf_(new_size)
memory.set_VirtualQuantity(vq)
weight = memory.get_Weight()
weight.set_valueOf_(str(int(new_size)*10))
memory.set_Weight(weight)
memory_string = CommonUtils.convertPythonObjToStr(memory, 'Memory')
Log.debug(self.logger, "memory: \n%s" % memory_string)
output = StringIO()
memory.export(output,
0,
name_ = 'Item',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"',
pretty_print = True)
body = output.getvalue().\
replace('Info msgid=""', "ovf:Info").replace("/Info", "/ovf:Info").\
replace("vmw:", "").replace("class:", "rasd:").replace("ResourceType", "rasd:ResourceType")
headers = self.headers
headers['Content-type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
self.response = Http.put(href, data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
raise Exception(self.response.status_code)
raise Exception('can\'t find vm')
def modify_vm_cpu(self, vm_name, cpus):
"""
Modify the virtual CPU allocation for VM.
:param vm_name: (str): The name of the vm to be customized.
:param cpus: (int): The number of virtual CPUs allocated to the VM.
:return: (TaskType) a :class:`pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities.TaskType` object that can be used to monitor the request. \n
if the task cannot be created a debug level log message is generated detailing the reason.
:raises: Exception: If the named VM cannot be located or another error occured.
"""
children = self.me.get_Children()
if children:
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
if len(vms) == 1:
sections = vm.get_Section()
virtualHardwareSection = filter(lambda section: section.__class__.__name__== "VirtualHardwareSection_Type", sections)[0]
items = virtualHardwareSection.get_Item()
cpu = filter(lambda item: (item.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href') != None and item.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href').endswith('/virtualHardwareSection/cpu')), items)[0]
href = cpu.get_anyAttributes_().get('{http://www.vmware.com/vcloud/v1.5}href')
en = cpu.get_ElementName()
en.set_valueOf_('%s virtual CPU(s)' % cpus)
cpu.set_ElementName(en)
vq = cpu.get_VirtualQuantity()
vq.set_valueOf_(cpus)
cpu.set_VirtualQuantity(vq)
cpu_string = CommonUtils.convertPythonObjToStr(cpu, 'CPU')
output = StringIO()
cpu.export(output,
0,
name_ = 'Item',
namespacedef_ = 'xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"',
pretty_print = True)
body = output.getvalue().\
replace('Info msgid=""', "ovf:Info").replace("/Info", "/ovf:Info").\
replace("vmw:", "").replace("class:", "rasd:").replace("ResourceType", "rasd:ResourceType")
headers = self.headers
headers['Content-type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
self.response = Http.put(href, data=body, headers=headers, verify=self.verify, logger=self.logger)
if self.response.status_code == requests.codes.accepted:
return taskType.parseString(self.response.content, True)
else:
raise Exception(self.response.status_code)
raise Exception('can\'t find vm')
def _get_vms(self):
children = self.me.get_Children()
if children:
return children.get_Vm()
else:
return []
def _modify_networkConnectionSection(self, section, new_connection,
primary_index=None):
#Need to add same interface more than once for a VM , so commenting out below lines
# for networkConnection in section.get_NetworkConnection():
# if (networkConnection.get_network().lower() ==
# new_connection.get_network().lower()):
# return (False,
# "VApp {0} is already connected to org vdc network {1}"
# .format(self.name, networkConnection.get_network()))
section.add_NetworkConnection(new_connection)
if section.get_Info() is None:
info = vcloudType.Msg_Type()
info.set_valueOf_("Network connection")
section.set_Info(info)
if primary_index is not None:
section.set_PrimaryNetworkConnectionIndex(primary_index)
def _create_networkConnection(self, network_name, index, ip_allocation_mode,
mac_address=None, ip_address=None):
networkConnection = vcloudType.NetworkConnectionType()
networkConnection.set_network(network_name)
networkConnection.set_NetworkConnectionIndex(index)
networkConnection.set_IpAddressAllocationMode(ip_allocation_mode)
networkConnection.set_IsConnected(True)
if ip_address and ip_allocation_mode == 'MANUAL':
networkConnection.set_IpAddress(ip_address)
if mac_address:
networkConnection.set_MACAddress(mac_address)
return networkConnection
|
|
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import argparse
import csv
import os
import os.path
import shutil
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data as data
import torchvision.datasets as datasets
#cudnn.enabled = False
cudnn.benchmark = False
#import torchvision.models as models
#import torchvision.transforms as transforms
#from PIL import Image
#from models.single_modality.word_embeddings import load_data as w_load_data
#from models.single_modality.word_embeddings import seq_model as w_seq_model
from models.multimodal import load_data as m_load_data
from models.multimodal import seq_model as m_seq_model
import torchvision.transforms as transforms
torch.multiprocessing.set_sharing_strategy('file_system')
import pickle
model_names = []
model_names.append('words_embeddings')
parser = argparse.ArgumentParser(description='PyTorch Cats vs Dogs fine-tuning example')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument(
'--arch',
metavar='ARCH',
default='words_embeddings',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: words_embeddings)')
parser.add_argument('--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch_size', default=16, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay')
parser.add_argument('--print-freq', default=1, type=int, metavar='N', help='print frequency')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--resume2', default='', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--test', dest='test', action='store_true', help='evaluate model on test set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--cweights', default='', type=str, metavar='PATH', help='path to the file containing the list of labels')
parser.add_argument('--softbatch', default=16, type=int, metavar='N', help='optimize parameters afer softbatch/batch_size samples')
parser.add_argument('--save-path', default='./', type=str, metavar='PATH', help='path to save checkpoints')
best_prec1 = 0
USE_CUDA=True
def main():
global args, best_prec1
args = parser.parse_args()
#if args.arch == "words_embeddings":
#load_data = w_load_data
# Data loading code
if False:
base_path_gremlin='/mnt/3T-NAS/Databases/jobScreening_cvpr17/'
base_path_croissant='/disks/md0-4T/users/csp/jobScreening_cvpr17_partial/'
base_path = base_path_croissant
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
reader = m_load_data.MultimodalReader(
base_path_gremlin + '/train/annotation_training.pkl',
base_path_gremlin + '/train/transcripts/ctms',
base_path_gremlin + '/train/audios/fbank',
#'/mnt/3T-NAS/csp/jobScreening_cvpr17/train/faces2/',
base_path_croissant + '/train/faces/vgg_features',
preload_path = base_path_croissant + '/train/preload/',
transform=transforms.Compose([
transforms.Scale(240),
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
reader_validation = m_load_data.MultimodalReader(
base_path_gremlin + '/validation/annotation_validation.pkl',
base_path_gremlin + '/validation/transcripts/ctms',
base_path_gremlin + '/validation/audios/fbank',
#'/mnt/3T-NAS/csp/jobScreening_cvpr17/validation/faces2/',
base_path_croissant + '/validation/faces/vgg_features',
preload_path = base_path_croissant + '/validation/preload/',
transform=transforms.Compose([
transforms.Scale(240),
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]),
word2id=reader.word2id,
id2word=reader.id2word
)
reader_test = m_load_data.MultimodalReader(
base_path_gremlin + '/test/annotation_test.pkl',
base_path_gremlin + '/test/transcripts/ctms',
base_path_gremlin + '/test/audios/fbank',
#'/mnt/3T-NAS/csp/jobScreening_cvpr17/test/faces2/',
base_path_croissant + '/test/faces/vgg_features',
preload_path = base_path_croissant + '/test/preload/',
transform=transforms.Compose([
transforms.Scale(240),
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]),
word2id=reader.word2id,
id2word=reader.id2word
)
with open('serialized_reader.pickle','wb') as stream:
pickle.dump(reader,stream)
with open('serialized_reader_validation.pickle','wb') as stream:
pickle.dump(reader_validation,stream)
with open('serialized_reader_test.pickle','wb') as stream:
pickle.dump(reader_test,stream)
return
else:
print("Loading serialized_reader")
with open('serialized_reader.pickle','rb') as stream:
reader = pickle.load(stream)
print("Loaded serialized_reader")
print("Loading serialized_reader_validation")
with open('serialized_reader_validation.pickle','rb') as stream:
reader_validation = pickle.load(stream)
print("Loaded serialized_reader_validation")
print("Loading serialized_reader_test")
with open('serialized_reader_test.pickle','rb') as stream:
reader_test = pickle.load(stream)
print("Loaded serialized_reader_test")
train_loader = data.DataLoader(
reader,
batch_size=args.batch_size,
shuffle=True,
num_workers=4,
pin_memory=False, collate_fn=m_load_data.my_collate)
val_loader = data.DataLoader(
reader_validation,
batch_size=args.batch_size,
shuffle=True,
num_workers=4,
pin_memory=False, collate_fn=m_load_data.my_collate)
test_loader = data.DataLoader(
reader_test,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=False, collate_fn=m_load_data.my_collate)
#train_loader = data.DataLoader(
#load_data.TranscriptionsReader('/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/transcription_training.pkl',
#'/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/annotation_training.pkl'),
#batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True,
#collate_fn = load_data.TranscriptionsReader.my_collate
#)
#val_loader = data.DataLoader(
#load_data.TranscriptionsReader('/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/transcription_training.pkl',
#'/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/annotation_training.pkl'),
#batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True,
#collate_fn = load_data.TranscriptionsReader.my_collate
#)
#test_loader = data.DataLoader(
#load_data.TranscriptionsReader('/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/transcription_training.pkl',
#'/mnt/3T-NAS/Databases/jobScreening_cvpr17/train/annotation_training.pkl'),
#batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True,
#collate_fn = load_data.TranscriptionsReader.my_collate
#)
if args.arch == "words_embeddings":
#model = m_seq_model.Vgg_face_sequence_model(nhid=32, nlayers=2)
model = m_seq_model.Word_Embeddings_sequence_model(vocab_size=len(train_loader.dataset.word2id),embedding_size=16,
nhid=256, nlayers=2)
#model = torch.nn.DataParallel(model).cuda()
if USE_CUDA:
model = model.cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#cudnn.benchmark = True
if args.test:
print("Testing the model and generating a output csv for submission")
test(test_loader, train_loader.dataset.class_to_idx, model)
return
# define loss function (criterion) and pptimizer
if args.cweights != '':
from sklearn.utils import class_weight
clabels = []
with open(args.cweights, 'r') as stream:
for line in stream:
line=line.strip()
clabels.append(line)
class_weight = class_weight.compute_class_weight('balanced', np.unique(clabels),clabels)
class_weight = torch.from_numpy(class_weight).float()
else:
class_weight = None
#criterion = nn.CrossEntropyLoss(weight=class_weight)
criterion = nn.MSELoss()
if USE_CUDA:
criterion.cuda()
#optimizer = optim.Adam(model.module.fc.parameters(), args.lr, weight_decay=args.weight_decay)
optimizer = optim.Adam( filter(lambda p: p.requires_grad, model.parameters()) , args.lr, weight_decay=args.weight_decay)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best Accuracy and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accs = {}
for key in train_loader.dataset.traits:
accs[key] = AverageMeter()
#import tracemalloc
#tracemalloc.start()
# switch to train mode
model.train()
soft_batch = int(np.ceil(args.softbatch/args.batch_size))
#acc_loss = 0
optimizer.zero_grad()
end = time.time()
for i, (transcripts, faces, filterbanks, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if USE_CUDA:
target = target.cuda(async=True)
hidden = model.init_hidden(transcripts.size()[0])
if USE_CUDA:
hidden = (hidden[0].cuda(), hidden[1].cuda())
train_text=False
train_audio=False
train_video=False
urand = np.random.uniform()
if urand < 1.0/3.0:
train_text = True
elif urand > 2.0/3.0:
train_audio = True
else:
train_video = True
y_pred = model(transcripts, faces, filterbanks, hidden, train_text = train_text, train_audio=train_audio, train_video=train_video)
##print(images)
##image_var = torch.autograd.Variable(images)
#images=faces[0]
##images=images[0:4,:,:,:,:]
#image_var = images
##print(images.size()[0])
label_var = torch.autograd.Variable(target)
#hidden = model.init_hidden(images.size()[0])
#if USE_CUDA:
#hidden = (hidden[0].cuda(), hidden[1].cuda())
##print(label_var)
##label_var + "hola"
## compute y_pred
#y_pred = model(image_var, hidden)
##print(y_pred)
loss = criterion(y_pred, label_var)
# measure accuracy and record loss
#prec1, prec1 = accuracy(y_pred.data, target, topk=(1, 1))
losses.update(loss.data[0], transcripts.size(0))
#acc.update(prec1[0], transcripts.size(0))
curr_accuracy = regression_accuracy(y_pred.data, target).cpu().numpy()[0]
for j, key in enumerate(train_loader.dataset.traits):
accs[key].update(curr_accuracy[j], transcripts.size(0))
# compute gradient and do SGD step
#optimizer.zero_grad()
loss.backward()
#optimizer.step()
if (i+1) % soft_batch == 0:
print("Optimizing parameters")
optimizer.step()
acc_loss = 0
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#snapshot = tracemalloc.take_snapshot()
#stats_file = open('stats.txt','w')
#top_stats = snapshot.statistics('lineno')
#for ts in top_stats:
#stats_file.write(str(ts))
#stats_file.write('\n')
#stats_file.close()
#return
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Interview Acc {interview.val:.3f} ({interview.avg:.3f})\t'
'agreeableness {agreeableness.val:.3f} ({agreeableness.avg:.3f})\t'
'conscientiousness {conscientiousness.val:.3f} ({conscientiousness.avg:.3f})\t'
'extraversion {extraversion.val:.3f} ({extraversion.avg:.3f})\t'
'neuroticism {neuroticism.val:.3f} ({neuroticism.avg:.3f})\t'
'openness {openness.val:.3f} ({openness.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, interview=accs['interview'], agreeableness=accs['agreeableness'], conscientiousness=accs['conscientiousness'], extraversion=accs['extraversion'], neuroticism=accs['neuroticism'], openness=accs['openness'] ))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
#acc = AverageMeter()
accs = {}
for key in val_loader.dataset.traits:
accs[key] = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
#label_var + "hola"
for i, (transcripts, faces, filterbanks, target) in enumerate(val_loader):
#data_time.update(time.time() - end)
if USE_CUDA:
target = target.cuda(async=True)
hidden = model.init_hidden(transcripts.size()[0])
if USE_CUDA:
hidden = (hidden[0].cuda(), hidden[1].cuda())
y_pred = model(transcripts, faces, filterbanks, hidden)
label_var = torch.autograd.Variable(target, volatile=True) # compute y_pred
loss = criterion(y_pred, label_var)
# measure accuracy and record loss
#prec1, temp_var = accuracy(y_pred.data, labels, topk=(1, 1))
losses.update(loss.data[0], transcripts.size(0))
#acc.update(prec1[0], transcripts.size(0))
#acc.update( 0.5, transcripts.size(0))
acc_mean=0
curr_accuracy = regression_accuracy(y_pred.data, target).cpu().numpy()[0]
for j, key in enumerate(val_loader.dataset.traits):
accs[key].update(curr_accuracy[j], transcripts.size(0))
acc_mean += accs[key].avg
acc_mean = acc_mean / 6.0
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('TrainVal: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Interview Acc {interview.val:.3f} ({interview.avg:.3f})\t'
'agreeableness {agreeableness.val:.3f} ({agreeableness.avg:.3f})\t'
'conscientiousness {conscientiousness.val:.3f} ({conscientiousness.avg:.3f})\t'
'extraversion {extraversion.val:.3f} ({extraversion.avg:.3f})\t'
'neuroticism {neuroticism.val:.3f} ({neuroticism.avg:.3f})\t'
'openness {openness.val:.3f} ({openness.avg:.3f})'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses, interview=accs['interview'], agreeableness=accs['agreeableness'], conscientiousness=accs['conscientiousness'], extraversion=accs['extraversion'], neuroticism=accs['neuroticism'], openness=accs['openness'] ))
print(' * Accuracy {acc_mean:.3f}'.format(acc_mean=acc_mean))
return acc_mean
def test(test_loader, class_to_idx, model):
csv_map = {}
csv2_map = {}
# switch to evaluate mode
model.eval()
old_sbj_id = ""
acc_probs = np.zeros(7)
ncount = 0
for i, (transcripts, faces, filterbanks, target) in enumerate(test_loader):
hidden = model.init_hidden(transcripts.size()[0])
if USE_CUDA:
hidden = (hidden[0].cuda(), hidden[1].cuda())
y_pred = model(transcripts, faces, hidden)
label_var = torch.autograd.Variable(labels, volatile=True) # compute y_pred
### THIS IS STILL BAD
scores = y_pred.data.cpu().numpy()
for i, key in enumerate(test_loader.dataset.traits):
out = ' ' + key + ' ' + str(scores[i])
sys.stdout.write(out)
#print(key, scores[i])
print("")
# get the index of the max log-probability
smax = nn.Softmax()
smax_out = smax(y_pred)[0]
angry_prob = smax_out.data[class_to_idx['Angry']]
disgust_prob = smax_out.data[class_to_idx['Disgust']]
fear_prob = smax_out.data[class_to_idx['Fear']]
happy_prob = smax_out.data[class_to_idx['Happy']]
neutral_prob = smax_out.data[class_to_idx['Neutral']]
sad_prob = smax_out.data[class_to_idx['Sad']]
surprise_prob = smax_out.data[class_to_idx['Surprise']]
#cat_prob = smax_out.data[0]
#dog_prob = smax_out.data[1]
#prob = dog_prob
#if cat_prob > dog_prob:
#prob = 1 - cat_prob
#prob = np.around(prob, decimals=4)
#prob = np.clip(prob, .0001, .999)
csv_map[filepath] = [angry_prob, disgust_prob, fear_prob, happy_prob, neutral_prob, sad_prob, surprise_prob]
#print(filepath, {"Angry" : angry_prob, "Disgust" : disgust_prob, "Fear" : fear_prob, "Happy": happy_prob, "Neutral" : neutral_prob, "Sad" : sad_prob, "Surprise" : surprise_prob})
sbj_id = str(filepath).strip().split('/')[-1].split('_')[0]
if sbj_id != old_sbj_id:
if old_sbj_id != "":
acc_probs = acc_probs / ncount
csv2_map[old_sbj_id] = acc_probs.tolist()
print(old_sbj_id, acc_probs.tolist())
acc_probs = np.array([angry_prob, disgust_prob, fear_prob, happy_prob, neutral_prob, sad_prob, surprise_prob])
ncount = 1
else:
acc_probs = acc_probs + np.array([angry_prob, disgust_prob, fear_prob, happy_prob, neutral_prob, sad_prob, surprise_prob])
ncount = ncount +1
old_sbj_id = sbj_id
with open(os.path.join(args.data, 'entry2.csv'), 'w') as csvfile:
csv_w = csv.writer(csvfile)
csv_w.writerow(('id', 'Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'))
for row in sorted(csv2_map.items()):
csv_w.writerow( tuple((str(row[0])+','+str(','.join([str(a) for a in row[1]]))).split(',')) )
with open(os.path.join(args.data, 'entry.csv'), 'w') as csvfile:
csv_w = csv.writer(csvfile)
csv_w.writerow(('id', 'Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'))
for row in sorted(csv_map.items()):
csv_w.writerow( tuple((str(row[0])+','+str(','.join([str(a) for a in row[1]]))).split(',')) )
return
def save_checkpoint(state, is_best):
filename=args.save_path + 'checkpoint.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, args.save_path + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.state_dict()['param_groups']:
param_group['lr'] = lr
def accuracy(y_pred, y_actual, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = y_actual.size(0)
_, pred = y_pred.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(y_actual.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def regression_accuracy(y_pred, y_actual):
return torch.mean(1.0 - torch.abs(y_pred-y_actual), dim=0)
class TestImageFolder(data.Dataset):
def __init__(self, root, transform=None):
#images = []
#for filename in os.listdir(root):
#if filename.endswith('jpg'):
#images.append('{}'.format(filename))
images = []
for target in sorted(os.listdir(root)):
d = os.path.join(root, target)
if not os.path.isdir(d):
continue
for r, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if fname.endswith('jpg'):
path = os.path.join(r, fname)
images.append('{}'.format(path))
self.root = root
self.imgs = images
self.transform = transform
def __getitem__(self, index):
filename = self.imgs[index]
img = Image.open(filename)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, filename
def __len__(self):
return len(self.imgs)
if __name__ == '__main__':
main()
|
|
#!/usr/local/bin/python3
"""
containers
Python 3 version
This is a simple implementation of Stack, Queue, and Deque
Under the covers these are all derived from a doubley
linked list class
Clive Darke QA
"""
class _Node:
def __init__(self, Data):
self.NextNode = None
self.PrevNode = None
self.Data = Data
def SetNext(self, Next):
self.NextNode = Next
def SetPrev(self, Prev):
self.PrevNode = Prev
def __str__(self):
return str(self. Data)
def GetNext(self, current):
return current.NextNode
def GetPrev(self, current):
return current.PreNode
################################################################################
class _DLinkList:
__delim=','
@classmethod
def SetDelim(cls,delim):
_DLinkList.__delim = delim
def __init__(self):
self.__head = None
self.__tail = None
def __str__(self):
StrList = []
node = self.__head
while node:
StrList.append(str(node))
node = node.NextNode
return _DLinkList.__delim.join(StrList)
def __len__(self):
# Consider instead maintaining a class
# variable holding the current length (like Perl)
count = 0
node = self.__head
while node:
count += 1
node = node.NextNode
return count
def _AddToLeft(self, Data):
NewNode = _Node(Data)
if self.__head:
NewNode.SetNext(self.__head)
self.__head.SetPrev(NewNode)
else:
self.__tail = NewNode
self.__head = NewNode
NewNode.SetPrev(None)
def _AddToRight(self, Data):
NewNode = _Node(Data)
if self.__tail:
NewNode.SetPrev(self.__tail)
self.__tail.SetNext(NewNode)
else:
self.__head = NewNode
self.__tail = NewNode
self.__tail.SetNext(None)
def _GetNextFromLeft(self, from_node):
if from_node is None:
from_node = self.__head
return from_node.NextNode
def _GetNextFromRight(self, from_node):
if from_node is None:
from_node = self.__tail
return from_node.PrevNode
def _GetHead(self):
return self.__head
def _GetTail(self):
return self.__tail
def _RemoveFromLeft(self):
if self.__head:
OldHead = self.__head
self.__head = OldHead.NextNode
return OldHead.Data
else:
raise IndexError("Attempt to remove from an empty container")
def _RemoveFromRight(self):
if self.__tail:
OldTail = self.__tail
if self.__tail is self.__head:
self.__head = None
self.__tail = None
else:
self.__tail = OldTail.PrevNode
if self.__tail:
self.__tail.SetNext(None)
return OldTail.Data
else:
raise IndexError("Attempt to remove from an empty container")
################################################################################
class Stack(_DLinkList):
"""
Implementation of a stack
Items are placed on the stack and are accessed as First-In Last-Out (FILO)
"""
def __init__(self):
super(Stack, self).__init__()
def push(self, Data):
"Push an item onto the stack"
super(Stack, self)._AddToLeft(Data)
def pop(self):
"Pop an item from the stack"
return super(Stack, self)._RemoveFromLeft()
# TODO __iter__
def __iter__(self):
pos = self._GetHead()
while pos:
yield pos.Data
pos = self._GetNextFromLeft(pos)
###############################################################################
class Queue(_DLinkList):
"""
Implementation of a queue
Items are placed on the queue and are accessed as First-In First-Out (FIFO)
"""
def __init__(self):
super(Queue, self).__init__()
def enqueue(self,Data):
"Places an item in the queue"
super(Queue, self). _AddToLeft(Data)
def dequeue(self):
"Remove and return the next item from the queue"
return super(Queue, self)._RemoveFromRight()
# TODO __iter__
def __iter__(self):
pos = self._GetTail()
while pos:
yield pos.Data
pos = self._GetNextFromRight(pos)
###############################################################################
class Deque(_DLinkList):
"""
Implementation of a double-ended queue
Items are placed on either end and can be accessed from either end
"""
def __init__(self):
super(Deque, self).__init__()
def unshift(self,Data):
"Places an item on the left"
super(Deque, self)._AddToLeft(Data)
def pop(self):
"Remove and return the item on the right"
return super(Deque, self)._RemoveFromRight()
def push(self,Data):
"Places an item on the right"
super(Deque, self)._AddToRight(Data)
def shift(self):
"Remove and return the item on the left"
return super(Deque, self)._RemoveFromLeft()
# Does it make sense to iterate through a deque?
################################################
def from_right(self):
pos = self._GetTail()
while pos:
yield pos.Data
pos = self._GetNextFromRight(pos)
def from_left(self):
pos = self._GetHead()
while pos:
yield pos.Data
pos = self._GetNextFromLeft(pos)
###############################################################################
if __name__ == "__main__":
import sys
print("STACK:")
astack = Stack()
astack.push(42)
astack.push(37)
astack.push(99)
astack.push(12)
print("Number of items in stack:", len(astack))
print("stack iterator")
print("Should print 12, 99, 37, 42")
try:
for item in astack:
print(item)
except TypeError as err:
print("****", err, file = sys.stderr)
print("\nQUEUE:")
aqueue = Queue()
aqueue.enqueue(12)
aqueue.enqueue(99)
aqueue.enqueue(37)
aqueue.enqueue(42)
print("Number of items in queue:", len(aqueue))
print("queue iterator")
print("Should print 12, 99, 37, 42")
try:
for item in aqueue:
print(item)
except TypeError as err:
print("****", err, file = sys.stderr)
print("\nDEQUE:")
adeque = Deque()
adeque.push(42)
adeque.push(37)
adeque.push(99)
adeque.push(12)
print("Number of items in deque:", len(aqueue))
try:
print("deque from_right iterator")
for item in adeque.from_right():
print(item)
except AttributeError as err:
print("****", err, file = sys.stderr)
try:
print("deque from_left iterator")
for item in adeque.from_left():
print(item)
except AttributeError as err:
print("****", err, file = sys.stderr)
# List comprehensions
slist = [item for item in astack]
print(slist)
qlist = [item for item in aqueue]
print(qlist)
ldeque = [item for item in adeque.from_left()]
print(ldeque)
rdeque = [item for item in adeque.from_right()]
print(rdeque)
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util, model_helper, brew, build
@unittest.skipIf(build.CAFFE2_NO_OPERATOR_SCHEMA,
'Built with CAFFE2_NO_OPERATOR_SCHEMA')
class TestShapeInference(test_util.TestCase):
def testShapeInferenceSimpleFC(self):
m = model_helper.ModelHelper(name="test_model")
brew.fc(m, "data", "fc1", dim_in=96, dim_out=32)
brew.fc(m, "fc1", "fc2", dim_in=32, dim_out=55)
(shapes, types) = workspace.InferShapesAndTypes(
[m.param_init_net, m.net],
{'data': [64, 96]}
)
self.assertEquals(shapes['data'], [64, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [64, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [64, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
workspace.FeedBlob("x", np.random.rand(4, 20, 36).astype(np.float32))
workspace.FeedBlob("w", np.random.rand(36, 36).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(36,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testFCTransposed(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FCTransposed(["x", "wt", "b"], ["y"])
workspace.FeedBlob("x", np.random.rand(20, 36).astype(np.float32))
workspace.FeedBlob("wt", np.random.rand(36, 48).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(48,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSlice(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Slice(["x"], ["y"], starts=[0, 0, 0, 0], ends=[-1, -1, -3, -1])
workspace.FeedBlob("x", np.random.rand(64, 1, 255, 384).astype(np.float32))
slice_starts = np.array([0, 0, 0, 0]).astype(np.int32)
slice_ends = np.array([-1, -1, -3, -1]).astype(np.int32)
slice_starts = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_starts)
slice_ends = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_ends)
model.net.Slice(["x2", slice_starts, slice_ends], ["y2"])
workspace.FeedBlob("x2", np.random.rand(64, 1, 255, 384).astype(np.float32))
self.InferTensorRunAndCompare(model, ["y2"])
def testShapeInferenceDistances(self):
model = model_helper.ModelHelper(name="test_model")
model.net.L1Distance(["x1", "y1"], "dl1_D1")
model.net.SquaredL2Distance(["x1", "y1"], "dl2_D1")
model.net.CosineSimilarity(["x1", "y1"], "dcos_D1")
model.net.DotProduct(["x1", "y1"], "ddot_D1")
model.net.DotProductWithPadding(["x1", "y1"], "ddotpad_D1")
model.net.L1Distance(["x2", "y2"], "dl1_D2")
model.net.SquaredL2Distance(["x2", "y2"], "dl2_D2")
model.net.CosineSimilarity(["x2", "y2"], "dcos_D2")
model.net.DotProduct(["x2", "y2"], "ddot_D2")
model.net.DotProductWithPadding(["x2", "z2"], "ddotpad_D2")
workspace.FeedBlob("x1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("y1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("x2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("z2", np.random.rand(10, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceReduceBackFrontX(self):
model = model_helper.ModelHelper(name="test_model")
model.net.ReduceBackSum(["x"], ["x_back_sum"])
model.net.ReduceBackMean(["x"], ["x_back_mean"])
model.net.ReduceFrontSum(["x"], ["x_front_sum"])
model.net.ReduceFrontMean(["x"], ["x_front_mean"])
model.net.ReduceFrontMax(["x"], ["x_front_max"])
workspace.FeedBlob("x", np.random.rand(10, 12, 18).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testGather(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Gather(["X", "idx"], "Y")
workspace.FeedBlob("X", np.random.rand(100, 4, 5).astype(np.float32))
workspace.FeedBlob("idx", np.array([[3, 18], [99, 4], [2, 5]]).astype(np.int32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceConvNet(self):
model = model_helper.ModelHelper(name="convtest")
model.NHWC2NCHW("data", "data_nchw")
brew.conv(model, "data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
brew.spatial_bn(model, 'conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)
brew.relu(model, 'conv1_spatbn_relu', 'conv1_spatbn_relu')
brew.max_pool(model, 'conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
brew.fc(model, 'pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
brew.dropout(model, 'fc', 'fc_drop', is_test=False)
model.Sigmoid('fc_drop', 'fc_sigm')
brew.softmax(model, 'fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
model.AddGradientOperators([loss])
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
workspace.FeedBlob(
"data",
np.random.rand(16, 227, 227, 3).astype(np.float32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
# Then do automatic comparison test: run the next once to
# initialize everything
self.InferTensorRunAndCompare(model)
def testShapeInferenceTranspose(self):
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
brew.transpose(
model,
["tensor"],
"transpose",
)
self.InferTensorRunAndCompare(model)
# Testing with axes defined
brew.transpose(
model,
["tensor"],
"transpose",
axes=np.random.permutation(5)
)
return self.InferTensorRunAndCompare(model)
def testShapeInferencePad(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad_t=100, pad_l=37, pad_b=28,
pad_r=20, mode="constant", order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTwoClass(self):
model = model_helper.ModelHelper(name="twoclass")
model.MakeTwoClass("v", "v2")
workspace.FeedBlob("v", np.random.rand(32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePadZero(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad=0, mode="constant",
order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceMatMul(self):
model = model_helper.ModelHelper(name="test_model")
model.MatMul(["x", "y"], "MatMul")
workspace.FeedBlob("x", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y", np.random.rand(5, 10).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSoftmaxWithLoss(self):
model = model_helper.ModelHelper(name="test_model")
model.SoftmaxWithLoss(
["logits", "labels"],
["softmax", "loss"],
)
# 2D Shape of [batch_size, num_classes]
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
# Shape of size batch_size with all values [0, num_classes)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=(4, 1)).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with 1D labels arg
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with weight_tensor
model.SoftmaxWithLoss(
["logits", "labels", "weight_tensor"],
["softmax", "loss"],
)
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
workspace.FeedBlob(
"weight_tensor",
np.random.rand(4).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test spatial model
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"img",
np.random.rand(32, 19, 33, 28).astype(np.float32)
)
workspace.FeedBlob(
"img_labels",
(np.random.rand(32, 33, 28) * 19).astype(np.int32)
)
model.SpatialSoftmaxWithLoss(
["img", "img_labels"],
["softmax_img", "loss"],
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceIm2Col(self):
# Test with NCHW
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NCHW")
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with NHWC
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NHWC")
workspace.FeedBlob(
"X",
np.random.rand(16, 228, 228, 3).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with different width and height
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel_h=8, kernel_w=4,
dilation=2, stride=2)
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 114).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTile(self):
m = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
for i in range(0, 4):
m.net.Tile(
"tensor", "tiled_tensor_{}".format(i), tiles=5, axis=i)
self.InferTensorRunAndCompare(m)
def testShapeInferenceFlatten(self):
model = model_helper.ModelHelper(name="test_model")
model.FlattenToVec("X", "FlatVec")
model.FlattenToVec("empty", "EmptyFlatVec")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with default axis (=1)
model = model_helper.ModelHelper(name="test_model")
model.Flatten("X", "Flat")
model.Flatten("empty", "EmptyFlat")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with axis
model = model_helper.ModelHelper(name="test_model")
x = np.random.randn(17, 5, 13)
for axis in range(x.ndim + 1):
model.Flatten("x", "Flat", axis=axis)
workspace.FeedBlob("x", x)
self.InferTensorRunAndCompare(model)
empty = np.random.randn(0, 5, 13)
for axis in range(empty.ndim + 1):
model.Flatten("empty", "Flat", axis=axis)
workspace.FeedBlob("empty", empty)
self.InferTensorRunAndCompare(model)
def testShapeInferenceReshape(self):
model = model_helper.ModelHelper(name="test_model")
model.Reshape("X", ["Reshaped", "Old_Shape"], shape=[8, 0, -1, 2])
workspace.FeedBlob("X", np.random.rand(4, 26, 32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceUnique(self):
for n in [0, 1]:
model = model_helper.ModelHelper(name="test_model")
model.Unique("X", ["Y"])
model.Unique("X", ["Z", "remap"])
workspace.FeedBlob("X", np.random.rand(n).astype(np.int64))
self.InferTensorRunAndCompare(model)
def testLengthsSum(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsSum(["X", "length"], ["sum"])
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testConcat(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E"], order="NCHW")
net.Concat(["E", "F"], ["G"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
def testSqueeze(self):
net = core.Net("sq")
net.Squeeze(["data"], ["data_squeezed"], dims=[3, 1])
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{'data': [64, 1, 96, 1, 4]}
)
self.assertEqual(shapes['data_squeezed'], [64, 96, 4])
def testCast(self):
model = model_helper.ModelHelper(name="test_model")
types = [
('bool', np.bool, caffe2_pb2.TensorProto.BOOL),
#('byte', None, caffe2_pb2.TensorProto.BYTE),
('int8', np.int8, caffe2_pb2.TensorProto.INT8),
('uint8', np.uint8, caffe2_pb2.TensorProto.UINT8),
('int16', np.int16, caffe2_pb2.TensorProto.INT16),
('uint16', np.uint16, caffe2_pb2.TensorProto.UINT16),
#('float16', np.float16, caffe2_pb2.TensorProto.FLOAT16),
('int32', np.int32, caffe2_pb2.TensorProto.INT32),
('float', np.float32, caffe2_pb2.TensorProto.FLOAT),
('int64', np.int64, caffe2_pb2.TensorProto.INT64),
('double', np.float64, caffe2_pb2.TensorProto.DOUBLE),
#('string', None, caffe2_pb2.TensorProto.STRING),
]
for (xstr, xnp, _) in types:
xname = 'X%s' % xstr
workspace.FeedBlob(xname, np.random.rand(1).astype(xnp))
for (ystr, _, yc2) in types:
yname = 'Y%s_to_%s' % (xstr, ystr)
model.Cast(xname, yname, to=yc2)
self.InferTensorRunAndCompare(model)
def testShapeInferenceRoiPool(self):
for is_test in [True, False]:
model = model_helper.ModelHelper(name="test_model")
outputs = ['Y'] if is_test else ['Y', 'argmaxes']
model.net.RoIPool(
['X', 'R'], outputs, pooled_h=4, pooled_w=5, is_test=is_test)
workspace.FeedBlob(
"X",
np.random.rand(100, 3, 4, 5).astype(np.float32))
workspace.FeedBlob(
"R",
np.random.rand(2, 5).astype(np.float32))
self.InferTensorRunAndCompare(model)
def InferTensorRunAndCompare(self, model, expected_uninferred_blobs=None):
'''
Runs shape inference, and then the model to check
that the inferred shapes agree with the actual ones
'expected_uninferred_blobs' is the list of blobs for which type and
shape cannot be inferred.
'''
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
)
# .. Create net
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
workspace.RunNet(model.Proto().name)
# ... and then check the shapes mismatch
correct_shapes = {}
correct_types = {}
for b in workspace.Blobs():
arr = workspace.FetchBlob(b)
correct_shapes[b] = arr.shape
if type(arr) is np.ndarray:
if arr.dtype == np.dtype('float32'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT
elif arr.dtype == np.dtype('int32'):
correct_types[b] = caffe2_pb2.TensorProto.INT32
# BYTE
# STRING
elif arr.dtype == np.dtype('bool'):
correct_types[b] = caffe2_pb2.TensorProto.BOOL
elif arr.dtype == np.dtype('uint8'):
correct_types[b] = caffe2_pb2.TensorProto.UINT8
elif arr.dtype == np.dtype('int8'):
correct_types[b] = caffe2_pb2.TensorProto.INT8
elif arr.dtype == np.dtype('uint16'):
correct_types[b] = caffe2_pb2.TensorProto.UINT16
elif arr.dtype == np.dtype('int16'):
correct_types[b] = caffe2_pb2.TensorProto.INT16
elif arr.dtype == np.dtype('int64'):
correct_types[b] = caffe2_pb2.TensorProto.INT64
elif arr.dtype == np.dtype('float16'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT16
elif arr.dtype == np.dtype('float64'):
correct_types[b] = caffe2_pb2.TensorProto.DOUBLE
else:
correct_types[b] = "unknown {}".format(arr.dtype)
else:
correct_types[b] = str(type(arr))
if expected_uninferred_blobs is None:
expected_uninferred_blobs = []
for b in correct_shapes:
# skip blobs for which shape couldn't be inferred
if b in expected_uninferred_blobs:
continue
self.assertTrue(
np.array_equal(
np.array(shapes[b]).astype(np.int32),
np.array(correct_shapes[b]).astype(np.int32)
),
"Shape {} mismatch: {} vs. correct {}".format(
b, shapes[b], correct_shapes[b]
)
)
self.assertFalse(
b not in types and b in correct_types,
"Type for {} not defined".format(b),
)
self.assertEqual(
types[b],
correct_types[b],
"Type {} mismatch: {} vs. {}".format(
b, types[b], correct_types[b],
)
)
if __name__ == "__main__":
unittest.main()
|
|
# ext/declarative/clsregistry.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`.relationship` using strings.
"""
import weakref
from ... import exc
from ... import inspection
from ... import util
from ...orm import class_mapper
from ...orm import ColumnProperty
from ...orm import interfaces
from ...orm import RelationshipProperty
from ...orm import SynonymProperty
from ...schema import _get_table_key
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in cls._decl_class_registry:
# class already exists.
existing = cls._decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = cls._decl_class_registry[
classname
] = _MultipleClassMarker([cls, existing])
else:
cls._decl_class_registry[classname] = cls
try:
root_module = cls._decl_class_registry["_sa_module_registry"]
except KeyError:
cls._decl_class_registry[
"_sa_module_registry"
] = root_module = _ModuleMarker("_sa_module_registry", None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = "on_remove", "contents", "__weakref__"
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set(
[weakref.ref(item, self._remove_item) for item in classes]
)
_registries.add(self)
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
'Multiple classes found for path "%s" '
"in the registry of this declarative "
"base. Please use a fully module-qualified path."
% (".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.remove(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
modules = set(
[
cls.__module__
for cls in [ref() for ref in self.contents]
if cls is not None
]
)
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table."
% (item.__module__, item.__name__)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
""""refers to a module name within
_decl_class_registry.
"""
__slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__"
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = _MultipleClassMarker(
[cls], on_remove=lambda: self._remove_item(name)
)
class _ModNS(object):
__slots__ = ("__parent",)
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError(
"Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key)
)
class _GetColumns(object):
__slots__ = ("cls",)
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise AttributeError(
"Class %r does not have a mapped column named %r"
% (self.cls, key)
)
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key
)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls)
)
class _GetTable(object):
__slots__ = "key", "metadata"
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[_get_table_key(key, self.key)]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
def __init__(self, cls, prop, fallback, arg):
self.cls = cls
self.prop = prop
self.arg = self._declarative_arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
def _access_cls(self, key):
cls = self.cls
if key in cls._decl_class_registry:
return _determine_container(key, cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
elif (
"_sa_module_registry" in cls._decl_class_registry
and key in cls._decl_class_registry["_sa_module_registry"]
):
registry = cls._decl_class_registry["_sa_module_registry"]
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def _raise_for_name(self, name, err):
util.raise_(
exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined."
% (self.prop.parent, self.arg, name, self.cls)
),
from_=err,
)
def _resolve_name(self):
name = self.arg
d = self._dict
rval = None
try:
for token in name.split("."):
if rval is None:
rval = d[token]
else:
rval = getattr(rval, token)
except KeyError as err:
self._raise_for_name(name, err)
except NameError as n:
self._raise_for_name(n.args[0], n)
else:
if isinstance(rval, _GetColumns):
return rval.cls
else:
return rval
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
self._raise_for_name(n.args[0], n)
def _resolver(cls, prop):
import sqlalchemy
from sqlalchemy.orm import foreign, remote
fallback = sqlalchemy.__dict__.copy()
fallback.update({"foreign": foreign, "remote": remote})
def resolve_arg(arg):
return _class_resolver(cls, prop, fallback, arg)
def resolve_name(arg):
return _class_resolver(cls, prop, fallback, arg)._resolve_name
return resolve_name, resolve_arg
def _deferred_relationship(cls, prop):
if isinstance(prop, RelationshipProperty):
resolve_name, resolve_arg = _resolver(cls, prop)
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"_user_defined_foreign_keys",
"remote_side",
):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_arg(v))
for attr in ("argument",):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_name(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in (
"primaryjoin",
"secondaryjoin",
"secondary",
"foreign_keys",
"remote_side",
"order_by",
):
if attr in kwargs and isinstance(
kwargs[attr], util.string_types
):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
|
|
import json
import numpy
from heroes import HEROES
__author__ = 'yanbo'
import io
import os
from smoke.io.wrap import demo as io_wrap_demo
from smoke.replay import demo as replay_demo
from smoke.replay.const import Data
import pylab
import matplotlib
import matplotlib.image
import scipy.ndimage
import cPickle
from mapping import CoordinateMapper, HIRES_MAP_REF
MAX_COORD_INTEGER = 16384
here = os.path.dirname(os.path.abspath(__file__))
REPLAY_FOLDER = os.path.join(here, 'replay_files')
"""
entity - something in-game that has networked information
DT - a string that identifies the kind of entity at hand
cls - an integer that identifies the kind of entity at hand
recv table - list of properties for entity of certain DT/cls; a template
"""
def get_hero_position(user_hero, position_x_index, position_y_index, position_vector_index, cell_width):
cell_x = user_hero.get(position_x_index) * cell_width + user_hero.get(position_vector_index)[0]/128.
cell_y = user_hero.get(position_y_index) * cell_width + user_hero.get(position_vector_index)[1]/128.
return cell_x, cell_y
def get_current_replay():
replay_list = []
for replay_name in os.listdir(REPLAY_FOLDER):
if replay_name.endswith(".dem"):
replay_list.append(replay_name)
print "Select replay which you wish to parse"
for index, replay_name in enumerate(replay_list):
print "{}) {}".format(index, replay_name)
user_selection = None
while True:
user_input = raw_input('>')
try:
user_selection = int(user_input)
except ValueError:
print "Invalid input - Please enter a valid number 0-{}".format(len(replay_list))
if user_selection is not None:
if 0 <= user_selection < len(replay_list):
break
else:
print "Invalid input - Please enter a valid number 0-{}".format(len(replay_list))
return os.path.join(REPLAY_FOLDER, replay_list[user_selection])
def gather_data_for_heatmap():
replay_path = get_current_replay()
with io.open(replay_path, 'rb') as replay_file:
demo_io = io_wrap_demo.Wrap(replay_file)
demo_io.bootstrap()
# we can seek on the raw underlying IO instead of parsing everything
parse_mask = Data.All
demo = replay_demo.Demo(demo_io, parse=parse_mask)
# skipping to the start of the match
demo.bootstrap()
received_tables = demo.match.recv_tables
class_info = demo.match.class_info
game_meta_tables = received_tables.by_dt['DT_DOTAGamerulesProxy']
game_status_index = game_meta_tables.by_name['dota_gamerules_data.m_nGameState']
npc_info_table = received_tables.by_dt['DT_DOTA_BaseNPC']
position_x_index = npc_info_table.by_name['m_cellX']
position_y_index = npc_info_table.by_name['m_cellY']
position_vector_index = npc_info_table.by_name['m_vecOrigin']
# we need to calculate dimensions of the cell used on map, so we can determine coordinates
base_entity_table = received_tables.by_dt['DT_BaseEntity']
cell_info_index = base_entity_table.by_name['m_cellbits']
user_hero_ehandle = None
hero_positions = []
cell_width = None
mapper = None
for match in demo.play():
# first we need to wait for game to start
game_meta = match.entities.by_cls[class_info['DT_DOTAGamerulesProxy']][0].state
current_game_status = game_meta.get(game_status_index)
if mapper is None:
towers = match.entities.by_cls[class_info['DT_DOTA_BaseNPC_Tower']]
mapper = CoordinateMapper(HIRES_MAP_REF, towers, received_tables)
"""
m_nGameState
1: Players loading in
2: Pick/ban in CM (not sure about other modes)
4: Pre-game (heroes selected but no creeps)
5: Game clock hits 0:00 (creeps spawn)
6: Game has ended (scoreboard)
"""
if cell_width is None:
base_game_info = match.entities.by_cls[class_info['DT_BaseEntity']][0].state
cell_width = 1 << base_game_info.get(cell_info_index)
if user_hero_ehandle is None and current_game_status == 5:
match_heroes = get_heroes_names_and_ehandles(match.entities, class_info, received_tables)
user_hero_ehandle = get_user_hero_ehandle(match_heroes)
elif user_hero_ehandle is not None:
user_hero_data = match.entities.by_ehandle[user_hero_ehandle].state
hero_positions.append(get_hero_position(user_hero_data,
position_x_index,
position_y_index,
position_vector_index, cell_width))
with open('data_position.pkl', 'wb') as output_file:
cPickle.dump(hero_positions, output_file, protocol=-1)
draw_heatmap(hero_positions, mapper)
demo.finish()
def get_user_hero_ehandle(match_heroes):
if len(match_heroes) != 10:
return
print "Select hero for which you wish to generate heatmap"
for index, hero in enumerate(match_heroes):
print "{}) {}".format(index, hero[0])
user_selection = None
while True:
user_input = raw_input('>')
try:
user_selection = int(user_input)
except ValueError:
print "Invalid input - Enter a number from 0 to 9"
if user_selection is not None and 0 <= user_selection <= 9:
break
print "Please wait while we generate heatmap"
return match_heroes[user_selection][1]
def get_heroes_names_and_ehandles(entities, class_info, received_tables):
world_data = entities.by_cls[class_info['DT_DOTA_PlayerResource']]
rt = received_tables.by_dt['DT_DOTA_PlayerResource']
current_data = world_data[0].state
hero_data = []
for i in range(10):
hero_ehandle_index = rt.by_name['m_hSelectedHero.{:04d}'.format(i)]
hero_id_index = rt.by_name['m_nSelectedHeroID.{:04d}'.format(i)]
hero_id = current_data.get(hero_id_index)
hero_ehandle = current_data.get(hero_ehandle_index)
localized_hero_name = HEROES[hero_id - 2]['localized_name']
hero_data.append((localized_hero_name, hero_ehandle))
return hero_data
def get_overview_data(replay_path):
with io.open(replay_path, 'rb') as replay_file:
demo_io = io_wrap_demo.Wrap(replay_file)
# returns offset to overview
overview_offset = demo_io.bootstrap()
# we can seek on the raw underlying IO instead of parsing everything
replay_file.seek(overview_offset)
demo = replay_demo.Demo(demo_io)
demo.finish()
return demo.match.overview
def rgb2gray(rgb):
return numpy.dot(rgb[..., :3], (0.299, 0.587, 0.144))
def draw_heatmap(hero_positions=None, mapper=None):
if hero_positions is None:
with open('data_position.pkl', 'rb') as output_pickle:
hero_positions = cPickle.load(output_pickle)
background_map = matplotlib.image.imread('dota_map_high_res.jpg')
# Grayscale the image so the contourf shows up more clearly
background_map = rgb2gray(background_map)
mapped_xs = []
mapped_ys = []
import matplotlib.pyplot as plt
""""
debug code start
"""
raw_x = [x[0] for x in hero_positions]
raw_y = [x[1] for x in hero_positions]
plt.clf()
plt.scatter(raw_x, raw_y)
plt.show()
"""
Debug code end
"""
for x, y in hero_positions:
mx, my = mapper.to_mapped(x, y)
mapped_xs.append(mx)
mapped_ys.append(my)
plt.clf()
plt.scatter(mapped_xs, mapped_ys)
plt.show()
plt.clf()
pylab.imshow(background_map[::-1, :], origin='lower', cmap=pylab.cm.gray)
#pylab.xlim(0, background_map.shape[1])
#pylab.ylim(0, background_map.shape[0])
pylab.scatter(mapped_xs, mapped_ys, color='blue')
plt.show()
return
"""
End debug
"""
blue_alpha = matplotlib.colors.LinearSegmentedColormap('BlueAlpha', {'red': ((0.0, 0.42, 0.42), (1.0, 0.03, 0.03)),
'green': ((0.0, 0.68, 0.68), (1.0, 0.19, 0.19)),
'blue': ((0.0, 0.84, 0.84), (1.0, 0.42, 0.42)),
'alpha': ((0.0, 0.0, 0.0), (0.05, 0.0, 0.0), (0.10, 0.5, 0.5), (1.0, 1.0, 1.0))})
orange_alpha = matplotlib.colors.LinearSegmentedColormap('OrangeAlpha', {'red': ((0.0, 1.0, 1.0), (1.0, 0.5, 0.5)),
'green': ((0.0, 0.55, 0.55), (1.0, 0.15, 0.15)),
'blue': ((0.0, 0.23, 0.23), (1.0, 0.0, 0.0)),
'alpha': ((0.0, 0.0, 0.0), (0.05, 0.0, 0.0), (0.10, 0.5, 0.5), (1.0, 1.0, 1.0))})
# Do a pixel-wide histogram followed by a strong Gaussian blur
xedges = numpy.arange(0, background_map.shape[0], 1)
yedges = numpy.arange(0, background_map.shape[1], 1)
radiant_H, xedges, yedges = numpy.histogram2d(mapped_xs, mapped_ys, bins=(xedges, yedges))
radiant_H = scipy.ndimage.gaussian_filter(radiant_H, sigma=50)
X, Y = 0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1])
# Re-orient so the (0,0) is in the radiant corner
pylab.imshow(background_map[::-1, :], origin='lower', cmap=pylab.cm.gray)
pylab.contourf(X, Y, numpy.log10(radiant_H.transpose()+1), 10, cmap=blue_alpha)
pylab.contourf(X, Y, numpy.log10(dire_H.transpose()+1), 10, cmap=orange_alpha)
pylab.xlim(0, background_map.shape[1])
pylab.ylim(0, background_map.shape[0])
pylab.gca().get_xaxis().set_visible(False)
pylab.gca().get_yaxis().set_visible(False)
pylab.tight_layout(0)
pylab.savefig('radiant_dire_heatmap.png')
pylab.close()
if __name__ == '__main__':
# print json.dumps(get_overview_data(), indent=4)
gather_data_for_heatmap()
#draw_heatmap()
|
|
"""abd automates the creation and landing of reviews from branches."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_processrepo
#
# Public Functions:
# create_review
# create_differential_review
# update_review
# update_in_review
# land
# create_failed_review
# try_create_review
# process_updated_branch
# process_abandoned_branch
# process_branches
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import phlcon_differential
import abdcmnt_commenter
import abdt_branch
import abdt_conduitgit
import abdt_exception
import abdt_git
import abdt_logging
import abdt_userwarning
_DEFAULT_TEST_PLAN = "I DIDNT TEST"
def create_review(conduit, branch, plugin_manager):
plugin_manager.hook(
"before_create_review",
{"conduit": conduit, "branch": branch})
branch.verify_review_branch_base()
# TODO: we should also cc other users on the branch
# TODO: if there are emails that don't match up to users then we should
# note that on the review and perhaps use the mailer to notify them
name, email, user, phid = abdt_conduitgit.getPrimaryUserDetailsFromBranch(
conduit, branch)
print "- author: " + user
user_warnings = []
message = branch.get_commit_message_from_tip()
parsed = conduit.parse_commit_message(message)
d = phlcon_differential
if parsed.errors:
error_list = phlcon_differential.parse_commit_message_errors(
parsed.errors)
for error in error_list:
if isinstance(error, d.ParseCommitMessageNoTestPlanFail):
parsed.fields["testPlan"] = _DEFAULT_TEST_PLAN
user_warnings.append(
abdt_userwarning.UsedDefaultTestPlan(_DEFAULT_TEST_PLAN))
elif isinstance(error, d.ParseCommitMessageUnknownReviewerFail):
user_warnings.append(
abdt_userwarning.UnknownReviewers(
error.user_list, message))
else:
raise abdt_exception.CommitMessageParseException(
errors=parsed.errors,
fields=parsed.fields,
digest=branch.make_message_digest())
# remove the author from reviewer list if present
reviewer_phids_key = phlcon_differential.MessageFields.reviewer_phids
if reviewer_phids_key in parsed.fields:
reviewer_phids = parsed.fields[reviewer_phids_key]
if phid in reviewer_phids:
reviewer_phids.remove(phid)
user_warnings.append(abdt_userwarning.SelfReviewer(user, message))
diff_result = branch.make_raw_diff()
raw_diff = diff_result.diff
if not raw_diff:
raise abdt_exception.AbdUserException("no difference to review")
if diff_result.reduction_list:
user_warnings.append(abdt_userwarning.LargeDiff(diff_result))
revisionid = create_differential_review(
conduit, user, parsed, branch, raw_diff)
commenter = abdcmnt_commenter.Commenter(conduit, revisionid)
if user_warnings:
commenter.userWarnings(user_warnings)
plugin_manager.hook(
"after_create_review",
{"parsed": parsed, "conduit": conduit, "branch": branch,
"rawDiff": raw_diff, "commenter": commenter}
)
abdt_logging.on_review_event(
'createrev', '{} created {} from {}'.format(
user, revisionid, branch.review_branch_name()))
def create_differential_review(conduit, user, parsed, branch, raw_diff):
print "- creating revision"
revision_id = conduit.create_revision_as_user(
raw_diff, parsed.fields, user)
print "- created " + str(revision_id)
branch.mark_ok_new_review(revision_id)
print "- commenting on " + str(revision_id)
commenter = abdcmnt_commenter.Commenter(conduit, revision_id)
commenter.createdReview(
branch.get_repo_name(),
branch.review_branch_hash(),
branch.review_branch_name(),
branch.base_branch_name(),
branch.get_browse_url())
return revision_id
def update_review(conduit, branch):
revision_id = branch.review_id_or_none()
if branch.has_new_commits():
print "changes on branch"
branch.verify_review_branch_base()
update_in_review(conduit, branch)
elif branch.is_status_bad_abandoned():
if not conduit.is_review_abandoned(revision_id):
# update the review as the branch may have been bad previously
# and we'll want to re-assess it's status
update_in_review(conduit, branch)
elif not conduit.is_review_recently_updated(revision_id):
review_name = branch.review_branch_name()
review_hash = branch.review_branch_hash()
branch.remove()
commenter = abdcmnt_commenter.Commenter(conduit, revision_id)
commenter.abandonedForUser(
review_name,
review_hash,
abdt_git.ARCYD_ABANDONED_REF)
return
elif conduit.is_review_abandoned(revision_id):
raise abdt_exception.ReviewAbandonedException()
elif branch.is_status_bad() and not branch.is_status_bad_land():
try:
print "try updating bad branch"
branch.verify_review_branch_base()
update_in_review(conduit, branch)
except abdt_exception.AbdUserException:
print "still bad"
if not branch.is_status_bad():
if conduit.is_review_accepted(revision_id):
branch.verify_review_branch_base()
land(conduit, branch)
# TODO: we probably want to do a better job of cleaning up locally
else:
print "do nothing"
def update_in_review(conduit, branch):
print "update_in_review"
print "- creating diff"
diff_result = branch.make_raw_diff()
if not diff_result.diff:
raise abdt_exception.AbdUserException("no difference to review")
user_warnings = []
if diff_result.reduction_list:
user_warnings.append(abdt_userwarning.LargeDiff(diff_result))
review_id = branch.review_id_or_none()
review_id_str = str(review_id)
print "- updating revision " + review_id_str
conduit.update_revision(
review_id,
diff_result.diff,
'update\n\n``` lang=text\n' + branch.describe_new_commits() + '```')
branch.mark_ok_in_review()
print "- commenting on revision " + review_id_str
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.updatedReview(
branch.review_branch_hash(),
branch.review_branch_name())
if user_warnings:
commenter.userWarnings(user_warnings)
abdt_logging.on_review_event(
'updaterev', '{} updated {}'.format(
branch.review_branch_name(), review_id))
def land(conduit, branch):
print "landing " + branch.review_branch_name()
review_branch_name = branch.review_branch_name()
base_branch_name = branch.base_branch_name()
names_emails = branch.get_author_names_emails()
if not names_emails:
raise abdt_exception.LandingException(
"no commits on branch", review_branch_name, base_branch_name)
# pick the last author as the author for landing
name, email = names_emails[-1]
review_id = branch.review_id_or_none()
# store the branch hash now, the branch will be invalid after landing
review_branch_hash = branch.review_branch_hash()
# compose the commit message
message = conduit.get_commit_message(review_id)
land_message = branch.land(name, email, message)
print "- commenting on revision " + str(review_id)
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.landedReview(
review_branch_hash,
review_branch_name,
base_branch_name,
land_message)
conduit.close_revision(review_id)
abdt_logging.on_review_event(
'landrev', '{} landed {}, {}'.format(
name, review_id, review_branch_name))
def create_failed_review(conduit, branch, exception):
user = abdt_conduitgit.getAnyUserFromBranch(conduit, branch)
reviewid = conduit.create_empty_revision_as_user(user)
commenter = abdcmnt_commenter.Commenter(conduit, reviewid)
commenter.failedCreateReview(
branch.get_repo_name(),
branch.review_branch_hash(),
branch.review_branch_name(),
branch.get_browse_url(),
exception)
branch.mark_new_bad_in_review(reviewid)
def try_create_review(
mailer, conduit, branch, plugin_manager, reporter, mail_on_fail):
try:
create_review(conduit, branch, plugin_manager)
except abdt_exception.AbdUserException as e:
print "failed to create:"
print e
try:
create_failed_review(conduit, branch, e)
except abdt_exception.NoUsersOnBranchException as e:
print "failed to create failed review:"
print e
branch.mark_bad_pre_review()
reporter.no_users_on_branch(e.emails)
if mail_on_fail:
mailer.noUsersOnBranch(
e.review_branch_name, e.base_name, e.emails)
def process_updated_branch(mailer, conduit, branch, plugin_manager, reporter):
abdte = abdt_exception
review_branch_name = branch.review_branch_name()
if branch.is_new():
print "create review for " + review_branch_name
try_create_review(
mailer,
conduit,
branch,
plugin_manager,
reporter,
mail_on_fail=True)
else:
review_id = branch.review_id_or_none()
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
if branch.is_status_bad_pre_review():
print "try again to create review for " + review_branch_name
has_new_commits = branch.has_new_commits()
try_create_review(
mailer,
conduit,
branch,
plugin_manager,
reporter,
mail_on_fail=has_new_commits)
else:
print "update review for " + review_branch_name
try:
update_review(conduit, branch)
except abdte.ReviewAbandonedException as e:
branch.mark_bad_abandoned()
commenter.exception(e)
except abdte.LandingException as e:
print "landing exception"
branch.mark_bad_land()
commenter.exception(e)
conduit.set_requires_revision(review_id)
except abdte.LandingPushBaseException as e:
print "landing push base exception"
# we don't need to set bad_land here, requiring revision is ok
commenter.exception(e)
conduit.set_requires_revision(review_id)
except abdte.AbdUserException as e:
print "user exception"
branch.mark_bad_in_review()
commenter.exception(e)
def process_abandoned_branch(conduit, branch):
print "untracking abandoned branch: " + branch.review_branch_name()
review_id = branch.review_id_or_none()
if review_id is not None:
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.abandonedBranch(branch.review_branch_name())
# TODO: abandon the associated revision if not already
branch.abandon()
def process_branches(branches, conduit, mailer, plugin_manager, reporter):
for branch in branches:
if branch.is_abandoned():
process_abandoned_branch(conduit, branch)
elif branch.is_null():
pass # TODO: should handle these
else:
reporter.start_branch(branch.review_branch_name())
print "pending:", branch.review_branch_name()
process_updated_branch(
mailer, conduit, branch, plugin_manager, reporter)
reporter.finish_branch(
abdt_branch.calc_is_ok(branch),
branch.review_id_or_none())
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
import re
import logging
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
class FirstincoffeeComSpider(BaseSpider):
name = '1stincoffee.com'
allowed_domains = ['1stincoffee.com']
start_urls = ('http://www.1stincoffee.com/search_results.asp?txtsearchParamTxt=&txtsearchParamCat=ALL&btnSearch.x=6&btnSearch.y=8&txtsearchParamType=ALL&iLevel=1&txtsearchParamMan=ALL&txtsearchParamVen=ALL&txtFromSearch=fromSearch',)
def parse(self, response):
URL_BASE = get_base_url(response)
hxs = HtmlXPathSelector(response)
# pages
page_urls = hxs.select("//td[@class='tdContent2']/table//a/@href").extract()
for url in page_urls:
url = urljoin_rfc(URL_BASE, url)
yield Request(url)
products = hxs.select("//td[@class='tdContent2']/form/table/tr[1]")
if not products:
print "ERROR!! NO PRODUCTS!! %s " % response.url
logging.error("ERROR!! NO PRODUCTS!! %s" % response.url)
for product_el in products:
name = product_el.select("td/b[1]/font/text()").extract()
if not name:
logging.error("ERROR!! NO NAME!! %s " % response.url)
continue
name = name[0]
url = product_el.select("td/a/@href").extract()
if not url:
url = response.url
else:
url = url[0]
url = urljoin_rfc(URL_BASE, url)
price = product_el.select("td/text() |\
td/font/b/text() |\
td/p/text()").extract()
if not price:
logging.error("ERROR!! NO PRICE!! %s, %s " % (response.url, name))
continue
price = "".join(price)
m = re.search("\$([\d\.,]*)", price)
if not m:
logging.error("ERROR!! NO PRICE!! %s, %s " % (response.url, name))
continue
price = m.group(1)
options = product_el.select(".//select/option/text()").extract()
if options:
for option in options:
m = re.search("([^(]*)(\([^)]*\$([\d\.,]*)[^)]*\))?", option)
if not m:
logging.error("ERROR!! NO PRICE!! %s, %s " % (response.url, name))
continue
name2, add_text, price2 = m.groups()
product_name = "%s, %s" % (name, name2)
product_price = Decimal(price.replace(",", ""))
if price2:
product_price += Decimal(price2)
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', product_name)
loader.add_value('price', product_price)
loader.add_value('sku', '')
yield loader.load_item()
else:
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
def parse_old(self, response):
URL_BASE = get_base_url(response)
hxs = HtmlXPathSelector(response)
# categories
category_urls = hxs.select("//div[@id='leftnav']/ul[position()<last()]/li/a/@href").extract()
for url in category_urls:
url = urljoin_rfc(URL_BASE, url)
yield Request(url)
# products list
products = hxs.select("//div[@class='productdiv']/table[@class='threecell']/tr/td")
if not products:
products = hxs.select("//div[@id='content']/table[@class='threecell']/tr/td")
if not products:
print "ERROR!! NO PRODUCTS!! %s " % response.url
logging.error("ERROR!! NO PRODUCTS!! %s" % response.url)
for product_el in products:
name = product_el.select("h3/text()").extract()
if name:
name = [x.strip() for x in name]
name = " ".join(name)
url = product_el.select("a[1]/@href").extract()
if not url:
print "ERROR!! NO URL!! %s" % response.url
logging.error("ERROR!! NO URL!! %s, %s" % (name, response.url))
continue
url = url[0]
url = urljoin_rfc(URL_BASE, url)
price = product_el.select("span/text()").extract()
if not price:
yield Request(url, callback=self.parse_product)
# print "ERROR!! NO PRICE!! %s" % response.url
# logging.error("ERROR!! NO PRICE!! %s, %s" % (name, response.url))
continue
price = [x.strip() for x in price]
price = "".join(price)
m = re.search("(starting|available|sizes)", price, re.IGNORECASE)
if m:
yield Request(url, callback=self.parse_product)
continue
m1 = re.search("(.*?(can|pound|oz|bag|brick|pack|jar|filter)+.*?)(for)?[\s]*(\$?[\d,\.]+)[\s]*$", price, re.IGNORECASE)
m2 = re.search("^[\s]*(\$?[\d,\.]+)(.*?(can|oz|bag|brick|pound|kilo|pack|jar|filter)+.*?)", price, re.IGNORECASE)
m = re.search("(\$?[\d]+[\d,\.]+)$", price)
if m1:
name += ", %s" % m1.group(1).strip()
price = m1.group(4)
elif m2:
name += ", %s" % m2.group(2).strip()
price = m2.group(1)
elif m:
price = m.group(1)
else:
yield Request(url, callback=self.parse_product)
# print "ERROR!! NO PRICE!! %s" % response.url
# logging.error("ERROR!! NO PRICE!! %s, %s" % (name, response.url))
continue
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
else:
# if refurbished products
name = product_el.select("a[1]/text()").extract()
if not name:
print "ERROR!! NO NAME 2!! %s" % response.url
logging.error("ERROR!! NO NAME 2!! %s" % response.url)
continue
name = [x.strip() for x in name]
name = name[0].strip().strip("-")
url = product_el.select("a[1]/@href").extract()
if not url:
print "ERROR!! NO URL 2!! %s" % response.url
logging.error("ERROR!! NO URL 2!! %s, %s" % (name, response.url))
continue
url = url[0]
url = urljoin_rfc(URL_BASE, url)
price = product_el.select("a/strong/text()").extract()
if not price:
yield Request(url, callback=self.parse_product)
# print "ERROR!! NO PRICE 2!! %s" % response.url
# logging.error("ERROR!! NO PRICE 2!! %s, %s" % (name, response.url))
continue
price = price[0]
m = re.search("(\$?[0-9\.]*)", price)
if m:
price = m.group(1)
else:
yield Request(url, callback=self.parse_product)
# print "ERROR!! NO PRICE 2!! %s" % response.url
# logging.error("ERROR!! NO PRICE 2!! %s, %s" % (name, response.url))
continue
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@id='pricetext']/b/text()").extract()
if not name:
print "ERROR!! NO NAME!! %s" % response.url
logging.error("ERROR!! NO NAME!! %s, %s" % (name, response.url))
return
name = [x.strip() for x in name]
name = name[0]
url = response.url
names2 = hxs.select("//div[@id='pricetext']/span[@class='style1']/text()").extract()
price_position = 1
if names2:
prices = hxs.select("//div[@id='pricetext']/text()").extract()
for name2 in names2:
name += " %s" % name2.strip()
price = prices[price_position]
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
price_position+=1
else:
prices = hxs.select("//div[@id='pricetext']/text()").extract()
if len(prices) > (price_position+1):
for i in range(price_position, len(prices)-1):
m = re.search("(.*?):[\s]*(\$?[\d,.]+)$", prices[i])
if m:
name += m.group(1).strip()
price = m.group(2)
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
else:
price = prices[price_position]
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
|
|
# -*- coding: utf-8 -*-
from flexmock import flexmock, flexmock_teardown
from .. import OratorTestCase, mock
from ..utils import MockModel, MockQueryBuilder, MockConnection, MockProcessor
from orator.query.grammars.grammar import QueryGrammar
from orator.query.builder import QueryBuilder
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm import belongs_to, has_many, scope
from orator.exceptions.orm import ModelNotFound
from orator.orm.collection import Collection
from orator.connections import Connection
from orator.query.processors import QueryProcessor
class BuilderTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_find_method(self):
builder = Builder(self.get_mock_query_builder())
builder.set_model(self.get_mock_model())
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value='baz')
result = builder.find('bar', ['column'])
builder.get_query().where.assert_called_once_with(
'foo_table.foo', '=', 'bar'
)
self.assertEqual('baz', result)
def test_find_by_method(self):
builder = Builder(self.get_mock_query_builder())
builder.set_model(self.get_mock_model())
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value='baz')
result = builder.find_by('foo', 'bar', ['column'])
builder.get_query().where.assert_called_once_with(
'foo', '=', 'bar'
)
self.assertEqual('baz', result)
def test_find_by_or_fail_raises_model_not_found_exception(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value=None)
self.assertRaises(
ModelNotFound,
builder.find_by_or_fail,
'foo',
'bar',
['column']
)
builder.get_query().where.assert_called_once_with(
'foo', '=', 'bar'
)
builder.first.assert_called_once_with(
['column']
)
def test_find_or_new_model_found(self):
model = self.get_mock_model()
model.find_or_new = mock.MagicMock(return_value='baz')
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value='baz')
expected = model.find_or_new('bar', ['column'])
result = builder.find('bar', ['column'])
builder.get_query().where.assert_called_once_with(
'foo_table.foo', '=', 'bar'
)
self.assertEqual(expected, result)
def test_find_or_new_model_not_found(self):
model = self.get_mock_model()
model.find_or_new = mock.MagicMock(return_value=self.get_mock_model())
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value=None)
result = model.find_or_new('bar', ['column'])
find_result = builder.find('bar', ['column'])
builder.get_query().where.assert_called_once_with(
'foo_table.foo', '=', 'bar'
)
self.assertIsNone(find_result)
self.assertIsInstance(result, Model)
def test_find_or_fail_raises_model_not_found_exception(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where = mock.MagicMock()
builder.first = mock.MagicMock(return_value=None)
self.assertRaises(
ModelNotFound,
builder.find_or_fail,
'bar',
['column']
)
builder.get_query().where.assert_called_once_with(
'foo_table.foo', '=', 'bar'
)
builder.first.assert_called_once_with(
['column']
)
def test_find_or_fail_with_many_raises_model_not_found_exception(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where_in = mock.MagicMock()
builder.get = mock.MagicMock(return_value=Collection([1]))
self.assertRaises(
ModelNotFound,
builder.find_or_fail,
[1, 2],
['column']
)
builder.get_query().where_in.assert_called_once_with(
'foo_table.foo', [1, 2]
)
builder.get.assert_called_once_with(
['column']
)
def test_first_or_fail_raises_model_not_found_exception(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.first = mock.MagicMock(return_value=None)
self.assertRaises(
ModelNotFound,
builder.first_or_fail,
['column']
)
builder.first.assert_called_once_with(
['column']
)
def test_find_with_many(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.get_query().where_in = mock.MagicMock()
builder.get = mock.MagicMock(return_value='baz')
result = builder.find([1, 2], ['column'])
self.assertEqual('baz', result)
builder.get_query().where_in.assert_called_once_with(
'foo_table.foo', [1, 2]
)
builder.get.assert_called_once_with(
['column']
)
def test_first(self):
model = self.get_mock_model()
builder = Builder(self.get_mock_query_builder())
builder.set_model(model)
builder.take = mock.MagicMock(return_value=builder)
builder.get = mock.MagicMock(return_value=Collection(['bar']))
result = builder.first()
self.assertEqual('bar', result)
builder.take.assert_called_once_with(
1
)
builder.get.assert_called_once_with(
['*']
)
def test_get_loads_models_and_hydrates_eager_relations(self):
flexmock(Builder)
builder = Builder(self.get_mock_query_builder())
builder.should_receive('get_models').with_args(['foo']).and_return(['bar'])
builder.should_receive('eager_load_relations').with_args(['bar']).and_return(['bar', 'baz'])
builder.set_model(self.get_mock_model())
builder.get_model().new_collection = mock.MagicMock(return_value=Collection(['bar', 'baz']))
results = builder.get(['foo'])
self.assertEqual(['bar', 'baz'], results.all())
builder.get_model().new_collection.assert_called_with(['bar', 'baz'])
def test_get_does_not_eager_relations_when_no_results_are_returned(self):
flexmock(Builder)
builder = Builder(self.get_mock_query_builder())
builder.should_receive('get_models').with_args(['foo']).and_return(['bar'])
builder.should_receive('eager_load_relations').with_args(['bar']).and_return([])
builder.set_model(self.get_mock_model())
builder.get_model().new_collection = mock.MagicMock(return_value=Collection([]))
results = builder.get(['foo'])
self.assertEqual([], results.all())
builder.get_model().new_collection.assert_called_with([])
def test_pluck_with_model_found(self):
builder = Builder(self.get_mock_query_builder())
model = {'name': 'foo'}
builder.first = mock.MagicMock(return_value=model)
self.assertEqual('foo', builder.pluck('name'))
builder.first.assert_called_once_with(
['name']
)
def test_pluck_with_model_not_found(self):
builder = Builder(self.get_mock_query_builder())
builder.first = mock.MagicMock(return_value=None)
self.assertIsNone(builder.pluck('name'))
def test_chunk(self):
query_builder = self.get_mock_query_builder()
query_results = [['foo1', 'foo2'], ['foo3']]
query_builder.chunk = mock.MagicMock(return_value=query_results)
builder = Builder(query_builder)
model = self.get_mock_model()
builder.set_model(model)
results = [Collection(['foo1', 'foo2']), Collection(['foo3'])]
model.hydrate = mock.MagicMock(return_value=[])
model.new_collection = mock.MagicMock(side_effect=results)
model.get_connection_name = mock.MagicMock(return_value='foo')
i = 0
for result in builder.chunk(2):
self.assertEqual(result, results[i])
i += 1
self.assertEqual(i, 2)
query_builder.chunk.assert_has_calls([
mock.call(2)
])
model.hydrate.assert_has_calls([
mock.call(['foo1', 'foo2'], 'foo'),
mock.call(['foo3'], 'foo')
])
model.new_collection.assert_has_calls([
mock.call([]),
mock.call([])
])
# TODO: lists with get mutators
def test_lists_without_model_getters(self):
builder = self.get_builder()
builder.get_query().get = mock.MagicMock(return_value=[{'name': 'bar'}, {'name': 'baz'}])
builder.set_model(self.get_mock_model())
builder.get_model().has_get_mutator = mock.MagicMock(return_value=False)
result = builder.lists('name')
self.assertEqual(['bar', 'baz'], result)
builder.get_query().get.assert_called_once_with(['name'])
def test_get_models_hydrates_models(self):
builder = Builder(self.get_mock_query_builder())
records = Collection([{
'name': 'john', 'age': 26
}, {
'name': 'jane', 'age': 28
}])
builder.get_query().get = mock.MagicMock(return_value=records)
model = self.get_mock_model()
builder.set_model(model)
model.get_connection_name = mock.MagicMock(return_value='foo_connection')
model.hydrate = mock.MagicMock(return_value=Collection(['hydrated']))
models = builder.get_models(['foo'])
self.assertEqual(models.all(), ['hydrated'])
model.get_table.assert_called_once_with()
model.get_connection_name.assert_called_once_with()
model.hydrate.assert_called_once_with(
records, 'foo_connection'
)
def test_macros_are_called_on_builder(self):
builder = Builder(QueryBuilder(
flexmock(Connection),
flexmock(QueryGrammar),
flexmock(QueryProcessor)
))
def foo_bar(builder):
builder.foobar = True
return builder
builder.macro('foo_bar', foo_bar)
result = builder.foo_bar()
self.assertEqual(result, builder)
self.assertTrue(builder.foobar)
def test_eager_load_relations_load_top_level_relationships(self):
flexmock(Builder)
builder = Builder(flexmock(QueryBuilder(None, None, None)))
nop1 = lambda: None
nop2 = lambda: None
builder.set_eager_loads({'foo': nop1, 'foo.bar': nop2})
builder.should_receive('_load_relation').with_args(['models'], 'foo', nop1).and_return(['foo'])
results = builder.eager_load_relations(['models'])
self.assertEqual(['foo'], results)
def test_eager_load_accept_queries(self):
model = OrmBuilderTestModelCloseRelated()
flexmock(Builder)
builder = Builder(flexmock(QueryBuilder(None, None, None)))
nop1 = OrmBuilderTestModelFarRelatedStub.where('id', 5)
builder.set_eager_loads({'foo': nop1})
relation = flexmock()
relation.should_receive('add_eager_constraints').once().with_args(['models'])
relation.should_receive('init_relation').once().with_args(['models'], 'foo').and_return(['models'])
relation.should_receive('get_eager').once().and_return(['results'])
relation.should_receive('match').once()\
.with_args(['models'], ['results'], 'foo').and_return(['foo'])
builder.should_receive('get_relation').once().with_args('foo').and_return(relation)
relation.should_receive('merge_query').with_args(nop1).and_return(relation)
results = builder.eager_load_relations(['models'])
self.assertEqual(['foo'], results)
def test_relationship_eager_load_process(self):
proof = flexmock()
flexmock(Builder)
builder = Builder(flexmock(QueryBuilder(None, None, None)))
def callback(q):
proof.foo = q
builder.set_eager_loads({'orders': callback})
relation = flexmock()
relation.should_receive('add_eager_constraints').once().with_args(['models'])
relation.should_receive('init_relation').once().with_args(['models'], 'orders').and_return(['models'])
relation.should_receive('get_eager').once().and_return(['results'])
relation.should_receive('get_query').once().and_return(relation)
relation.should_receive('match').once()\
.with_args(['models'], ['results'], 'orders').and_return(['models.matched'])
builder.should_receive('get_relation').once().with_args('orders').and_return(relation)
results = builder.eager_load_relations(['models'])
self.assertEqual(['models.matched'], results)
self.assertEqual(relation, proof.foo)
def test_get_relation_properly_sets_nested_relationships(self):
flexmock(Builder)
builder = Builder(flexmock(QueryBuilder(None, None, None)))
model = flexmock(Model())
relation = flexmock()
model.set_relation('orders', relation)
builder.set_model(model)
relation_query = flexmock()
relation.should_receive('get_query').and_return(relation_query)
relation_query.should_receive('with_').once().with_args({'lines': None, 'lines.details': None})
builder.set_eager_loads({
'orders': None,
'orders.lines': None,
'orders.lines.details': None
})
relation = builder.get_relation('orders')
def test_query_passthru(self):
builder = self.get_builder()
builder.get_query().foobar = mock.MagicMock(return_value='foo')
self.assertIsInstance(builder.foobar(), Builder)
self.assertEqual(builder.foobar(), builder)
builder = self.get_builder()
builder.get_query().insert = mock.MagicMock(return_value='foo')
self.assertEqual('foo', builder.insert(['bar']))
builder.get_query().insert.assert_called_once_with(['bar'])
def test_query_scopes(self):
builder = self.get_builder()
builder.get_query().from_ = mock.MagicMock()
builder.get_query().where = mock.MagicMock()
model = OrmBuilderTestModelScopeStub()
builder.set_model(model)
result = builder.approved()
self.assertEqual(result, builder)
def test_simple_where(self):
builder = self.get_builder()
builder.get_query().where = mock.MagicMock()
result = builder.where('foo', '=', 'bar')
self.assertEqual(builder, result)
builder.get_query().where.assert_called_once_with('foo', '=', 'bar', 'and')
def test_nested_where(self):
nested_query = self.get_builder()
nested_raw_query = self.get_mock_query_builder()
nested_query.get_query = mock.MagicMock(return_value=nested_raw_query)
model = self.get_mock_model()
builder = self.get_builder()
builder.set_model(model)
builder.get_query().add_nested_where_query = mock.MagicMock()
result = builder.where(nested_query)
self.assertEqual(builder, result)
builder.get_query().add_nested_where_query.assert_called_once_with(nested_raw_query, 'and')
# TODO: nested query with scopes
def test_delete_override(self):
builder = self.get_builder()
builder.on_delete(lambda builder_: {'foo': builder_})
self.assertEqual({'foo': builder}, builder.delete())
def test_has_nested(self):
builder = OrmBuilderTestModelParentStub.where_has('foo', lambda q: q.has('bar'))
result = OrmBuilderTestModelParentStub.has('foo.bar').to_sql()
self.assertEqual(builder.to_sql(), result)
def test_has_nested_with_constraints(self):
model = OrmBuilderTestModelParentStub
builder = model.where_has('foo', lambda q: q.where_has('bar', lambda q: q.where('baz', 'bam'))).to_sql()
result = model.where_has('foo.bar', lambda q: q.where('baz', 'bam')).to_sql()
self.assertEqual(builder, result)
def test_where_exists_accepts_builder_instance(self):
model = OrmBuilderTestModelCloseRelated
builder = model.where_exists(OrmBuilderTestModelFarRelatedStub.where('foo', 'bar')).to_sql()
self.assertEqual(
'SELECT * FROM "orm_builder_test_model_close_relateds" '
'WHERE EXISTS (SELECT * FROM "orm_builder_test_model_far_related_stubs" WHERE "foo" = ?)',
builder
)
def get_builder(self):
return Builder(self.get_mock_query_builder())
def get_mock_model(self):
model = MockModel().prepare_mock()
return model
def get_mock_query_builder(self):
connection = MockConnection().prepare_mock()
processor = MockProcessor().prepare_mock()
builder = MockQueryBuilder(
connection,
QueryGrammar(),
processor
).prepare_mock()
return builder
class OratorTestModel(Model):
@classmethod
def _boot_columns(cls):
return []
@classmethod
def resolve_connection(cls, connection=None):
return flexmock(Connection(None))
class OrmBuilderTestModelFarRelatedStub(OratorTestModel):
pass
class OrmBuilderTestModelScopeStub(OratorTestModel):
@scope
def approved(self, query):
query.where('foo', 'bar')
class OrmBuilderTestModelCloseRelated(OratorTestModel):
@has_many
def bar(self):
return OrmBuilderTestModelFarRelatedStub
class OrmBuilderTestModelParentStub(OratorTestModel):
@belongs_to
def foo(self):
return OrmBuilderTestModelCloseRelated
|
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for subsampling the MAG dataset."""
import collections
import jraph
import numpy as np
def get_or_sample_row(node_id: int,
nb_neighbours: int,
csr_matrix, remove_duplicates: bool):
"""Either obtain entire row or a subsampled set of neighbours."""
if node_id + 1 >= csr_matrix.indptr.shape[0]:
lo = 0
hi = 0
else:
lo = csr_matrix.indptr[node_id]
hi = csr_matrix.indptr[node_id + 1]
if lo == hi: # Skip empty neighbourhoods
neighbours = None
elif hi - lo <= nb_neighbours:
neighbours = csr_matrix.indices[lo:hi]
elif hi - lo < 5 * nb_neighbours: # For small surroundings, sample directly
nb_neighbours = min(nb_neighbours, hi - lo)
inds = lo + np.random.choice(hi - lo, size=(nb_neighbours,), replace=False)
neighbours = csr_matrix.indices[inds]
else: # Otherwise, do not slice -- sample indices instead
# To extend GraphSAGE ("uniform w/ replacement"), modify this call
inds = np.random.randint(lo, hi, size=(nb_neighbours,))
if remove_duplicates:
inds = np.unique(inds)
neighbours = csr_matrix.indices[inds]
return neighbours
def get_neighbours(node_id: int,
node_type: int,
neighbour_type: int,
nb_neighbours: int,
remove_duplicates: bool,
author_institution_csr, institution_author_csr,
author_paper_csr, paper_author_csr,
paper_paper_csr, paper_paper_transpose_csr):
"""Fetch the edge indices from one node to corresponding neighbour type."""
if node_type == 0 and neighbour_type == 0:
csr = paper_paper_transpose_csr # Citing
elif node_type == 0 and neighbour_type == 1:
csr = paper_author_csr
elif node_type == 0 and neighbour_type == 3:
csr = paper_paper_csr # Cited
elif node_type == 1 and neighbour_type == 0:
csr = author_paper_csr
elif node_type == 1 and neighbour_type == 2:
csr = author_institution_csr
elif node_type == 2 and neighbour_type == 1:
csr = institution_author_csr
else:
raise ValueError('Non-existent edge type requested')
return get_or_sample_row(node_id, nb_neighbours, csr, remove_duplicates)
def get_senders(neighbour_type: int,
sender_index,
paper_features):
"""Get the sender features from given neighbours."""
if neighbour_type == 0 or neighbour_type == 3:
sender_features = paper_features[sender_index]
elif neighbour_type == 1 or neighbour_type == 2:
sender_features = np.zeros((sender_index.shape[0],
paper_features.shape[1])) # Consider averages
else:
raise ValueError('Non-existent node type requested')
return sender_features
def make_edge_type_feature(node_type: int, neighbour_type: int):
edge_feats = np.zeros(7)
edge_feats[node_type] = 1.0
edge_feats[neighbour_type + 3] = 1.0
return edge_feats
def subsample_graph(paper_id: int,
author_institution_csr,
institution_author_csr,
author_paper_csr,
paper_author_csr,
paper_paper_csr,
paper_paper_transpose_csr,
max_nb_neighbours_per_type,
max_nodes=None,
max_edges=None,
paper_years=None,
remove_future_nodes=False,
deduplicate_nodes=False) -> jraph.GraphsTuple:
"""Subsample a graph around given paper ID."""
if paper_years is not None:
root_paper_year = paper_years[paper_id]
else:
root_paper_year = None
# Add the center node as "node-zero"
sub_nodes = [paper_id]
num_nodes_in_subgraph = 1
num_edges_in_subgraph = 0
reached_node_budget = False
reached_edge_budget = False
node_and_type_to_index_in_subgraph = dict()
node_and_type_to_index_in_subgraph[(paper_id, 0)] = 0
# Store all (integer) depths as an additional feature
depths = [0]
types = [0]
sub_edges = []
sub_senders = []
sub_receivers = []
# Store all unprocessed neighbours
# Each neighbour is stored as a 4-tuple (node_index in original graph,
# node_index in subsampled graph, type, number of hops away from source).
# TYPES: 0: paper, 1: author, 2: institution, 3: paper (for bidirectional)
neighbour_deque = collections.deque([(paper_id, 0, 0, 0)])
max_depth = len(max_nb_neighbours_per_type)
while neighbour_deque and not reached_edge_budget:
left_entry = neighbour_deque.popleft()
node_index, node_index_in_sampled_graph, node_type, node_depth = left_entry
# Expand from this node, to a node of related type
for neighbour_type in range(4):
if reached_edge_budget:
break # Budget may have been reached in previous type; break here.
nb_neighbours = max_nb_neighbours_per_type[node_depth][node_type][neighbour_type] # pylint:disable=line-too-long
# Only extend if we want to sample further in this edge type
if nb_neighbours > 0:
sampled_neighbors = get_neighbours(
node_index,
node_type,
neighbour_type,
nb_neighbours,
deduplicate_nodes,
author_institution_csr,
institution_author_csr,
author_paper_csr,
paper_author_csr,
paper_paper_csr,
paper_paper_transpose_csr,
)
if sampled_neighbors is not None:
if remove_future_nodes and root_paper_year is not None:
if neighbour_type in [0, 3]:
sampled_neighbors = [
x for x in sampled_neighbors
if paper_years[x] <= root_paper_year
]
if not sampled_neighbors:
continue
nb_neighbours = len(sampled_neighbors)
edge_feature = make_edge_type_feature(node_type, neighbour_type)
for neighbor_original_idx in sampled_neighbors:
# Key into dict of existing nodes using both node id and type.
neighbor_key = (neighbor_original_idx, neighbour_type % 3)
# Get existing idx in subgraph if it exists.
neighbor_subgraph_idx = node_and_type_to_index_in_subgraph.get(
neighbor_key, None)
if (not reached_node_budget and
(not deduplicate_nodes or neighbor_subgraph_idx is None)):
# If it does not exist already, or we are not deduplicating,
# just create a new node and update the dict.
neighbor_subgraph_idx = num_nodes_in_subgraph
node_and_type_to_index_in_subgraph[neighbor_key] = (
neighbor_subgraph_idx)
num_nodes_in_subgraph += 1
sub_nodes.append(neighbor_original_idx)
types.append(neighbour_type % 3)
depths.append(node_depth + 1)
if max_nodes is not None and num_nodes_in_subgraph >= max_nodes:
reached_node_budget = True
continue # Move to next neighbor which might already exist.
if node_depth < max_depth - 1:
# If the neighbours are to be further expanded, enqueue them.
# Expand only if the nodes did not already exist.
neighbour_deque.append(
(neighbor_original_idx, neighbor_subgraph_idx,
neighbour_type % 3, node_depth + 1))
# The neighbor id within graph is now fixed; just add edges.
if neighbor_subgraph_idx is not None:
# Either node existed before or was successfully added.
sub_senders.append(neighbor_subgraph_idx)
sub_receivers.append(node_index_in_sampled_graph)
sub_edges.append(edge_feature)
num_edges_in_subgraph += 1
if max_edges is not None and num_edges_in_subgraph >= max_edges:
reached_edge_budget = True
break # Break out of adding edges for this neighbor type
# Stitch the graph together
sub_nodes = np.array(sub_nodes, dtype=np.int32)
if sub_senders:
sub_senders = np.array(sub_senders, dtype=np.int32)
sub_receivers = np.array(sub_receivers, dtype=np.int32)
sub_edges = np.stack(sub_edges, axis=0)
else:
# Use empty arrays.
sub_senders = np.zeros([0], dtype=np.int32)
sub_receivers = np.zeros([0], dtype=np.int32)
sub_edges = np.zeros([0, 7])
# Finally, derive the sizes
sub_n_node = np.array([sub_nodes.shape[0]])
sub_n_edge = np.array([sub_senders.shape[0]])
assert sub_nodes.shape[0] == num_nodes_in_subgraph
assert sub_edges.shape[0] == num_edges_in_subgraph
if max_nodes is not None:
assert num_nodes_in_subgraph <= max_nodes
if max_edges is not None:
assert num_edges_in_subgraph <= max_edges
types = np.array(types)
depths = np.array(depths)
sub_nodes = {
'index': sub_nodes.astype(np.int32),
'type': types.astype(np.int16),
'depth': depths.astype(np.int16),
}
return jraph.GraphsTuple(nodes=sub_nodes,
edges=sub_edges.astype(np.float16),
senders=sub_senders.astype(np.int32),
receivers=sub_receivers.astype(np.int32),
globals=np.array([0], dtype=np.int16),
n_node=sub_n_node.astype(dtype=np.int32),
n_edge=sub_n_edge.astype(dtype=np.int32))
|
|
"""
Taiga integration for Zulip.
Tips for notification output:
*Emojis*: most of the events have specific emojis e.g.
- :notebook: - change of subject/name/description
- :chart_with_upwards_trend: - change of status
etc. If no there's no meaningful emoji for certain event, the defaults are used:
- :thought_balloon: - event connected to commenting
- :busts_in_silhouette: - event connected to a certain user
- :package: - all other events connected to user story
- :calendar: - all other events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new value should always be in bold; otherwise the
subject of US/task should be in bold.
"""
from __future__ import absolute_import
from typing import Any, Dict, List, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, client, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text, Text) -> HttpResponse
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + '\n')
content = "".join(sorted(content_lines))
check_send_message(user_profile, client, 'stream', [stream], topic, content)
return json_success()
templates = {
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned user story **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s**'
' from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'unset_milestone': u':calendar: %(user)s removed user story **%(subject)s** from sprint %(old)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s'
' to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s**'
' from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s**'
' from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned task **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned task **%(subject)s**'
' from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s**'
' from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.', #
'unset_assigned_to': u':busts_in_silhouette: %(user)s unassigned issue **%(subject)s**.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s**'
' from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description_diff': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
# type: (str, Mapping[str, Any]) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
""" Parses the payload and finds previous and current value of change_type."""
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old = message["change"]["diff"][change_type]["from"]
except KeyError:
old = None
try:
new = message["change"]["diff"][change_type]["to"]
except KeyError:
new = None
return old, new
def parse_comment(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_create_or_delete(message):
# type: (Mapping[str, Any]) -> Dict[str, Any]
""" Parses create or delete event. """
return {
'type': message["type"],
'event': message["action"],
'values': {
'user': get_owner_name(message),
'subject': get_subject(message)
}
}
def parse_change_event(change_type, message):
# type: (str, Mapping[str, Any]) -> Dict[str, Any]
""" Parses change event. """
evt = {} # type: Dict[str, Any]
values = {
'user': get_owner_name(message),
'subject': get_subject(message)
} # type: Dict[str, Any]
if change_type in ["description_diff", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
elif not new:
event_type = "unset_" + change_type
values["old"] = old
else:
event_type = "changed_" + change_type
values.update({'old': old, 'new': new})
elif change_type == "is_blocked":
if message["change"]["diff"]["is_blocked"]["to"]:
event_type = "blocked"
else:
event_type = "unblocked"
elif change_type == "is_closed":
if message["change"]["diff"]["is_closed"]["to"]:
event_type = "closed"
else:
event_type = "reopened"
elif change_type == "user_story":
old, new = get_old_and_new_values(change_type, message)
event_type = "changed_us"
values.update({'old': old, 'new': new})
elif change_type in ["subject", 'name']:
event_type = 'renamed'
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
elif change_type in ["estimated_finish", "estimated_start"]:
old, new = get_old_and_new_values(change_type, message)
if not old == new:
event_type = change_type
values.update({'old': old, 'new': new})
else:
# date hasn't changed
return None
elif change_type in ["priority", "severity", "type", "status"]:
event_type = 'changed_' + change_type
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
else:
# we are not supporting this type of event
return None
evt.update({"type": message["type"], "event": event_type, "values": values})
return evt
def parse_message(message):
# type: (Mapping[str, Any]) -> List[Dict[str, Any]]
""" Parses the payload by delegating to specialized functions. """
events = []
if message["action"] in ['create', 'delete']:
events.append(parse_create_or_delete(message))
elif message["action"] == 'change':
if message["change"]["diff"]:
for value in message["change"]["diff"]:
parsed_event = parse_change_event(value, message)
if parsed_event:
events.append(parsed_event)
if message["change"]["comment"]:
events.append(parse_comment(message))
return events
def generate_content(data):
# type: (Mapping[str, Any]) -> str
""" Gets the template string and formats it with parsed data. """
try:
return templates[data['type']][data['event']] % data['values']
except KeyError:
return json_error(_("Unknown message"))
def get_owner_name(message):
# type: (Mapping[str, Any]) -> str
return message["by"]["full_name"]
def get_subject(message):
# type: (Mapping[str, Any]) -> str
data = message["data"]
return data.get("subject", data.get("name"))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import unittest
import numpy as np
import tensorflow as tf
from niftynet.engine.image_window import N_SPATIAL, LOCATION_FORMAT
from niftynet.engine.image_window_dataset import ImageWindowDataset
from niftynet.io.image_reader import ImageReader
from tests.niftynet_testcase import NiftyNetTestCase
IMAGE_PATH_2D_1 = os.path.join('.', 'example_volumes', 'gan_test_data')
IMAGE_PATH_3D = os.path.join('.', 'testing_data')
def get_2d_reader():
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D_1}}
reader = ImageReader().initialise(data_param)
return reader
def get_3d_reader():
data_param = {
'mr': {
'path_to_search': IMAGE_PATH_3D,
'filename_contains': 'FLAIR',
'interp_order': 1}}
reader = ImageReader().initialise(data_param)
return reader
class ImageWindowGenerator(ImageWindowDataset):
"""
simple test class, replace ImageWindowDataset's layer_op
"""
def __init__(self, *args, **kwargs):
ImageWindowDataset.__init__(self, *args, **kwargs)
def layer_op(self):
for idx in range(self.n_subjects):
yield super(ImageWindowGenerator, self).layer_op(idx)
# for idx in range(self.n_subjects):
# image_id, image_data, _ = self.reader(idx=idx)
# for mod in list(image_data):
# spatial_shape = image_data[mod].shape[:N_SPATIAL]
# coords = self.dummy_coordinates(image_id, spatial_shape, 1)
# image_data[LOCATION_FORMAT.format(mod)] = coords
# image_data[mod] = image_data[mod][np.newaxis, ...]
# yield image_data
@unittest.skip("temp skipping window generator test")
class ImageWindowDataset_Generator_2D_Test(NiftyNetTestCase):
def assert_window(self, window):
if not isinstance(window, dict):
window = next(window)
self.assertEqual(window['mr'].shape[1:3], (120, 160))
self.assertEqual(window['mr_location'][0, 1:].tolist(),
[0, 0, 0, 120, 160, 1])
self.assertEqual(window['mr'].dtype, np.float32)
self.assertEqual(window['mr_location'].dtype, np.int32)
def assert_tf_window(self, sampler):
with self.cached_session() as sess:
window = sess.run(sampler.pop_batch_op())
self.assert_window(window)
def test_simple(self):
sampler = ImageWindowGenerator(reader=get_2d_reader())
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_batch_size(self):
# batch size doesn't change the numpy interface
sampler = ImageWindowGenerator(reader=get_2d_reader(), batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_window_size(self):
sampler = ImageWindowGenerator(
reader=get_2d_reader(),
window_sizes=(0, 0, 0), batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_window_size_dict(self):
sampler = ImageWindowGenerator(
reader=get_2d_reader(),
window_sizes={'mr': (0, 0, 0)},
batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
# # sampler layer_op()'s output shape is not checked
# def test_wrong_window_size_dict(self):
# sampler = ImageWindowGenerator(
# reader=get_2d_reader(),
# batch_size=2,
# window_sizes=(3, 3, 0))
# self.assert_tf_window(sampler)
def test_windows_per_image(self):
sampler = ImageWindowGenerator(
reader=get_2d_reader(), batch_size=2, windows_per_image=2)
self.assert_window(sampler())
def test_epoch(self):
reader = get_2d_reader()
batch_size = 3
sampler = ImageWindowGenerator(
reader=reader, batch_size=batch_size, epoch=1)
with self.cached_session() as sess:
next_element = sampler.pop_batch_op()
iters = 0
try:
for _ in range(400):
window = sess.run(next_element)
iters = iters + 1
except tf.errors.OutOfRangeError:
pass
# batch size 3, 40 images in total
self.assertEqual(
np.ceil(reader.num_subjects / np.float(batch_size)), iters)
@unittest.skip("temp skipping window generator test")
class ImageWindowDataset_Generator_3D_Test(NiftyNetTestCase):
def assert_window(self, window):
if not isinstance(window, dict):
window = next(window)
self.assertEqual(window['mr'].shape[1:4], (256, 168, 256))
self.assertEqual(window['mr_location'][0, 1:].tolist(),
[0, 0, 0, 256, 168, 256])
self.assertEqual(window['mr'].dtype, np.float32)
self.assertEqual(window['mr_location'].dtype, np.int32)
def assert_tf_window(self, sampler):
with self.cached_session() as sess:
window = sess.run(sampler.pop_batch_op())
self.assert_window(window)
def test_simple(self):
sampler = ImageWindowGenerator(reader=get_3d_reader())
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_batch_size(self):
# batch size doesn't change the numpy interface
sampler = ImageWindowGenerator(reader=get_3d_reader(), batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_window_size(self):
sampler = ImageWindowGenerator(
reader=get_3d_reader(),
window_sizes=(0, 0, 0), batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_window_size_dict(self):
sampler = ImageWindowGenerator(
reader=get_3d_reader(),
window_sizes={'mr': (0, 0, 0)},
batch_size=2)
self.assert_tf_window(sampler)
self.assert_window(sampler())
def test_windows_per_image(self):
sampler = ImageWindowGenerator(
reader=get_3d_reader(), batch_size=2,
windows_per_image=2)
self.assert_window(sampler())
def test_epoch(self):
reader = get_3d_reader()
batch_size = 3
sampler = ImageWindowGenerator(
reader=reader, batch_size=batch_size, epoch=1)
with self.cached_session() as sess:
next_element = sampler.pop_batch_op()
iters = 0
try:
for _ in range(400):
window = sess.run(next_element)
iters = iters + 1
except tf.errors.OutOfRangeError:
pass
# batch size 3, 4 images in total
self.assertEqual(
np.ceil(reader.num_subjects / np.float(batch_size)), iters)
@unittest.skip("temp skipping window generator test")
class ImageDatasetParamTest(NiftyNetTestCase):
def run_dataset(self, n_iters, n_threads, **kwargs):
sampler = ImageWindowGenerator(**kwargs)
sampler.set_num_threads(n_threads)
with self.cached_session() as sess:
true_iters = 0
next_element = sampler.pop_batch_op()
windows = []
try:
for _ in range(min(n_iters, 100)):
windows.append(
sess.run(next_element)['mr_location'])
true_iters = true_iters + 1
except (tf.errors.OutOfRangeError, EOFError):
pass
assert true_iters <= 100, 'keep the test smaller than 100 iters'
return true_iters, np.concatenate(windows, 0)
def test_function(self):
reader = get_2d_reader()
#### with default batch padding
n_iters, windows = self.run_dataset(
n_iters=2,
n_threads=4,
reader=reader,
batch_size=100,
smaller_final_batch_mode='pad',
windows_per_image=1,
epoch=4)
# elements: 4 * 40, batch size 100, resulting 2 batches
self.assertEqual(n_iters, 2)
self.assertEqual(windows.shape[0], 200)
# all subjects evaluated
uniq, counts = np.unique(windows[:, 0], return_counts=True)
self.assertEqual(len(uniq), 41)
self.assertTrue(np.all(counts[1:] == 4))
#### with drop batch
n_iters, windows = self.run_dataset(
n_iters=2,
n_threads=3,
reader=reader,
batch_size=100,
smaller_final_batch_mode='drop',
epoch=3)
# elements: 4 * 40, batch size 100, resulting 1 batches
self.assertEqual(n_iters, 1)
self.assertEqual(windows.shape[0], 100)
# all subjects evaluated, might not get all unique items
# self.assertEqual(len(np.unique(windows[:, 0])), 40)
#### with drop batch
n_iters, windows = self.run_dataset(
n_iters=2,
n_threads=4,
reader=reader,
batch_size=100,
queue_length=100,
smaller_final_batch_mode='dynamic',
epoch=4)
# elements: 4 * 40, batch size 100, resulting 2 batches
self.assertEqual(n_iters, 2)
self.assertEqual(windows.shape[0], 160)
# all subjects evaluated
uniq, counts = np.unique(windows[:, 0], return_counts=True)
self.assertEqual(len(uniq), 40)
self.assertTrue(np.all(counts == 4))
if __name__ == "__main__":
tf.test.main()
|
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import os
import saga.context
import saga.exceptions as se
import saga.adaptors.base
import saga.adaptors.cpi.context
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
######################################################################
#
# adaptor meta data
#
_ADAPTOR_NAME = 'saga.adaptor.ssh'
_ADAPTOR_SCHEMAS = ['ssh']
_ADAPTOR_OPTIONS = []
# FIXME: complete attribute list
_ADAPTOR_CAPABILITIES = {
'ctx_attributes' : {saga.context.TYPE : "This MUST be set to ssh",
saga.context.USER_ID : "user name on target machine",
saga.context.USER_KEY : "maps to public ssh key",
saga.context.USER_CERT : "maps to private ssh key",
saga.context.USER_PASS : "passphrase for encrypted keys"}
}
_ADAPTOR_DOC = {
'name' : _ADAPTOR_NAME,
'cfg_options' : _ADAPTOR_OPTIONS,
'capabilities' : _ADAPTOR_CAPABILITIES,
'description' : """
This SSH :class:`saga.Context` adaptor points to an ssh keypair and a user_id
to be used for ssh based backend connections. For example, an ssh context can
be used to start jobs (:class:`saga.job.Job`) via ssh, to copy files
(:class:`saga.filesystem.File`) via sftp, etc.
Not all supported attributes have to be defined when using an ssh context
adaptor -- unspecified attributes will have sensible default values. For
example, the ``c.user_id`` will default to the local user id, and the default
passphrase in ``c.user_pass`` will be empty.
The `UserKey` and `UserCert` attributes can point to either the public or
private key of the ssh keypair -- the SAGA-Python implementation will internally
complete the respective other key (public key file names are expected to be
derived from the private key, by appending the suffix `.pub` -- `.pem` files are
expected to contain both public and private key.).
""",
'schemas' : {'ssh' : 'ssh key and userid information.'},
'example' : "examples/context/context_ssh.py"
}
_ADAPTOR_INFO = {
'name' : _ADAPTOR_NAME,
'version' : 'v0.1',
'schemas' : _ADAPTOR_SCHEMAS,
'cpis' : [{
'type' : 'saga.Context',
'class' : 'ContextSSH'
}
]
}
# ------------------------------------------------------------------------------
#
class Adaptor (saga.adaptors.base.Base):
"""
This is the actual adaptor class, which gets loaded by SAGA (i.e. by the
SAGA engine), and which registers the CPI implementation classes which
provide the adaptor's functionality.
"""
# --------------------------------------------------------------------------
#
def __init__ (self) :
saga.adaptors.base.Base.__init__ (self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
# there are no default myproxy contexts
self._default_contexts = []
self._have_defaults = False
# --------------------------------------------------------------------------
#
def sanity_check (self) :
pass
# --------------------------------------------------------------------------
#
def _get_default_contexts (self) :
if not self._have_defaults :
import glob
candidate_certs = glob.glob ("%s/.ssh/*" % os.environ['HOME'])
for key in candidate_certs :
if os.path.isdir (key) :
# don't want directories -- only keys
continue
if key.endswith ('.pub') :
# don't want public keys in this loop
continue
if key.endswith ('.pem') :
pub = "%s" % key
else :
pub = "%s.pub" % key
# the private and public keys must exist
if not os.path.exists (key ) or \
not os.path.isfile (key ) :
self._logger.info ("ignore ssh key at %s (no private key: %s)" % (key, key))
continue
if not os.path.exists (pub) or \
not os.path.isfile (pub) :
self._logger.info ("ignore ssh key at %s (no public key: %s)" % (key, pub))
continue
try :
fh_key = open (key )
except Exception as e:
self._logger.info ("ignore ssh key at %s (key not readable: %s)" % (key, e))
continue
else :
fh_key .close ()
try :
fh_pub = open (pub )
except Exception as e:
self._logger.info ("ignore ssh key at %s (public key %s not readable: %s)" % (key, pub, e))
continue
else :
fh_pub .close ()
import subprocess
if not subprocess.call (["sh", "-c", "grep ENCRYPTED %s > /dev/null" % key]) :
# needs passphrase. Great, actually, but won't work for
# default contexts as long as we can't prompt for pass
# phrases...
self._logger.warn ("ignore ssh key at %s (requires passphrase)" % key)
continue
c = saga.Context ('ssh')
c.user_key = key
c.user_cert = pub
self._default_contexts.append (c)
self._logger.info ("default ssh key at %s" % key)
self._have_defaults = True
# have defaults, and can return them...
return self._default_contexts
# ------------------------------------------------------------------------------
#
class ContextSSH (saga.adaptors.cpi.context.Context) :
# --------------------------------------------------------------------------
#
def __init__ (self, api, adaptor) :
_cpi_base = super (ContextSSH, self)
_cpi_base.__init__ (api, adaptor)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def init_instance (self, adaptor_state, type) :
if not type.lower () in (schema.lower() for schema in _ADAPTOR_SCHEMAS) :
raise se.BadParameter \
("the ssh context adaptor only handles ssh contexts - duh!")
self.get_api ().type = type
return self.get_api ()
# --------------------------------------------------------------------------
#
@SYNC_CALL
def _initialize (self, session) :
# make sure we have can access the key
api = self.get_api ()
key = None
pub = None
pwd = None
if api.attribute_exists (saga.context.USER_KEY ) :
key = api.get_attribute (saga.context.USER_KEY )
if api.attribute_exists (saga.context.USER_CERT) :
pub = api.get_attribute (saga.context.USER_CERT)
if api.attribute_exists (saga.context.USER_PASS) :
pwd = api.get_attribute (saga.context.USER_PASS)
# either user_key or user_cert should be specified (or both),
# then we complement the other, and convert to/from private
# from/to public keys
if pub and not key :
key = pub
if not key :
# nothing to do, really. This likely means that ssh setup is
# done out-of-band.
return
# convert public key into private key
if key.endswith ('.pub') :
if not pub :
pub = key
key = key[:-4]
elif key.endswith ('.pem') :
if not pub :
pub = key
else :
if not pub :
pub = key+'.pub'
# update the context with these setting
api.set_attribute (saga.context.USER_KEY , key)
api.set_attribute (saga.context.USER_CERT, pub)
# the private and public keys must exist
if not os.path.exists (key) or \
not os.path.isfile (key) :
raise se.BadParameter ("ssh key inaccessible: %s" % (key))
if not os.path.exists (pub) or \
not os.path.isfile (pub) :
raise se.BadParameter ("ssh public key inaccessible: %s" % (pub))
try :
fh_key = open (key)
except Exception as e:
raise se.PermissionDenied ("ssh key '%s' not readable: %s" % (key, e))
else :
fh_key.close ()
try :
fh_pub = open (pub)
except Exception as e:
raise se.PermissionDenied ("ssh public key '%s' not readable: %s" % (pub, e))
else :
fh_pub.close ()
import subprocess
if not subprocess.call (["sh", "-c", "grep ENCRYPTED %s > /dev/null" % key]) :
if pwd :
if subprocess.call (["sh", "-c", "ssh-keygen -y -f %s -P %s > /dev/null" % (key, pwd)]) :
raise se.PermissionDenied ("ssh key '%s' is encrypted, incorrect password" % (key))
else :
self._logger.error ("ssh key '%s' is encrypted, unknown password" % (key))
self._logger.info ("init SSH context for key at '%s' done" % key)
|
|
#!/usr/bin/python3
#import the ibm code
from IBMQuantumExperience import *
from interactCfg import *
from helperFunction import *
from Error import *
import os
import sys
import re
from Gate import SplitGate
import time
#get the info about the function name and the line number
def get_curl_info():
try:
raise Exception
except:
f = sys.exc_info()[2].tb_frame.f_back
return [f.f_code.co_name, f.f_lineno]
qasmDic = {
"I":"id",
"X":"x",
"Y":"y",
"Z":"z",
"H":"h",
"S":"s",
"T":"t",
"CNOT":"cx",
"Td":"tdg",
"Sd":"sdg",
"M":"measure",
"Rz":"u1",
"Ry":"u3"
}
#the max execute times is made by IBM stuff
MAXTIMES = 8192
#the min executive times is made by IBM stuff
MINTIMES = 1
class IBMQX:
def __init__(self):
print("Connecting to the Server...")
#change the config message in config/IBMToken.cfg
tokenDic = readCfgPM()
self.__config = {
"url": tokenDic['url']
}
#init the api
self.api = IBMQuantumExperience(tokenDic['token'], self.__config)
print("Getting the available backend information...")
deviceList = self.__getAvailalbeBak()
self.device = tokenDic['device']
self.shot = int(tokenDic['shot'])
if self.device not in deviceList:
try:
raise IBMError("The seleted device isn't available")
except IBMError as ie:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg(ie.value,funName,line)
if self.shot < MINTIMES or self.shot > MAXTIMES:
try:
raise IBMError("The execute times must be from " + str(MINTIMES) + " to " + str(MAXTIMES) + ", but the input is " + str(self.shot))
except IBMError as ie:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg(ie.value,funName,line)
#get the connectivity map of the device according to the name of the device
try:
dic = tokenDic['connectivity'][self.device]
#change the key and item from str to int
self.connectivity = {}
for key in dic:
for item in dic[key]:
if int(key) in self.connectivity:
self.connectivity[int(key)].append(int(item))
else:
self.connectivity[int(key)] = [int(item)]
except KeyError as ke:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("the IBMToken.cfg doesn't have the connectivity of the current device: " + self.device,funName,line)
#create a new folder to save the data of IBMQX
circuit = checkEnvironment()
if os.path.exists(circuit.urls + "/IBMQX") == False:
try:
os.makedirs(circuit.urls + "/IBMQX")
except OSError:
info = get_curl_info()
funName = info[0]
line = info[1]
interactCfg.writeErrorMsg("Can't create the new folder 'IBMQX'!",funName,line)
#get the availalbe backend, return the backend list
def __getAvailalbeBak(self):
result = []
lists = self.api.available_backends()
for item in lists:
try:
backend = item['name']
result.append(backend)
except KeyError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Can't get the key:'name' in the backend information!".funName,line)
return result
#translate the QASM to Open-QASM
#the parameter 'c' is the current Circuit instance
#return True: translate successfully!
#return False: there is if statement in the QASM code
def __translateQASM(self,c):
global QASM
#the code has been store in circuit.url/QASM.txt
codeLocation = c.urls + "/Physical-Level/QASM.txt"
#this function must be called after circuit.execute()
if os.path.exists(codeLocation) == False:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("The QASM code hasn't been generated, please check your code!",funName,line)
file = open(codeLocation)
codes = file.readlines()
file.close()
for item in codes:
#whether the statement is "if"
if re.search(r'if(.+)',item) != None:
return False
tmpCode = ""
tmp = item.split(" ")
gate = tmp[0]
#if the gate is M: M q[0] -> c[0], we have to get the tmp[1:len]
qubitL = tmp[1:len(tmp)]
try:
para = None
if re.search(r'^R\w{1}\(.+\)$',gate) != None:
#the gate is Rn and has the parameter
para = gate.split("(")[1].split(")")[0]
gate = gate.split("(")[0]
gate = qasmDic[gate]
if gate == "u1":
gate += "(" + para + ")"
if gate == "u3":
gate += "(" + para + ",0,0)"
#the gate with parameter
tmpCode += gate + " "
for q in qubitL:
tmpCode += q
except KeyError:
info = get_curl_info()
funName = info[0]
line = info[1]
#print(tmpCode)
QASM.append(tmpCode)
return True
#adjust the QASM code, which is producted by circuit.QASM(), so that the qubits can satisfy the constraint
#of the CNOT connectivity
def __canExecute(self):
circuit = checkEnvironment()
if circuit == None:
return None
global QASM,qubitList,CNOTList
QASM = []
#record the reason for why can't execute the code
reasonList = []
print("Translating the QASM to Open-QASM...")
if self.__translateQASM(circuit):
#translate successfully!
print("Optimizing the Open-QASM code, please wait for a while...")
#record the ids of qubits in the current circuit
qubitList = []
#record the cnot map in the current circuit
CNOTList = []
#find the num in the str
mode = re.compile(r'\d+')
#analyse the QASM code
for l in range(0,len(QASM)):
if l == 0:
continue
else:
qs = mode.findall(QASM[l])
if "measure" in QASM[l]:
qs = [qs[0]]
for q in qs:
if int(q) in qubitList:
continue
else:
qubitList.append(int(q))
if "cx" in QASM[l]:
#get the id of control-qubit and target-qubit
tQ = int(qs[1])
cQ = int(qs[0])
#the reverse cnot won't be appended to the list
if [cQ,tQ] in CNOTList or [tQ,cQ] in CNOTList:
continue
CNOTList.append([cQ,tQ])
totalConnectivity = self.__getTotalConnectivity()
cnotBool = True
idBool = True
idBool = self.__determindID(totalConnectivity,reasonList)
if idBool:
cnotBool = self.__checkAllConstraint(CNOTList,totalConnectivity)
if cnotBool == False:
#when __adjustCNOT was called, the CNOTList doesn't satisfy the constraint of IBM directly
cnotBool = self.__adjustCNOT(totalConnectivity,reasonList)
#the circuit can be executed
if idBool & cnotBool:
numQ = str(len(totalConnectivity))
code = 'OPENQASM 2.0;include "qelib1.inc";qreg q[' + numQ + '];creg c[' + numQ + '];\n'
self.__reverseCNOT()
for line in QASM:
code += line
try:
file = open(circuit.urls + "/IBMQX/Open-QASM.txt","w")
file.write(code)
file.close()
except IOError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Can't write QASM code to Open-QASM.txt!",funName,line)
return code
else:
#if statement
reasonList.append("The Mif and Qif aren't supported by IBMQX for now!")
#if statement or there is something wrong with the number of qubits or connectivity of qubits
self.__writeErrorMsg(circuit.urls,reasonList)
#write the reason why the code can't be executed on IBMQX
def __writeErrorMsg(self,urls,reasonList:list):
#can't execute the circuit
file = open(urls + "/IBMQX/codeWarning.txt",'a')
file.write("WARNING:\n")
#write the reason in codeWarning.txt
for i in range(0,len(reasonList)):
strs = str(i+1) + "." + reasonList[i] + "\n"
file.write(strs)
return None
#add self.connectivity and reverse self.connectivity, the type of the returned parameter is dict
def __getTotalConnectivity(self):
totalConnectivity = {}
for cQ in self.connectivity:
for tQ in self.connectivity[cQ]:
if cQ in totalConnectivity:
totalConnectivity[cQ].append(tQ)
else:
totalConnectivity[cQ] = [tQ]
if tQ in totalConnectivity:
totalConnectivity[tQ].append(cQ)
else:
totalConnectivity[tQ] = [cQ]
return totalConnectivity
#determind whether the number of qubit in this circuit is more than the actual number
#if bigger, return False; else return True;
#if necessary, adjust the id of the qubit so that they are in line with the actual device
def __determindID(self,totalConnectivity,reasonList):
#we assume that there is no qubit isolated in ibm chip!
useQubit = len(qubitList)
actualQubit = len(totalConnectivity)
if useQubit > actualQubit:
reasonList.append("There are "+ str(useQubit) + " have been used! But the device you choose only have " + str(actualQubit) + " qubits!")
return False
if max(qubitList) < actualQubit:
return True
qubitList.sort()
availalbleQ = [i for i in range(0,actualQubit)]
qMap = {}
for q in qubitList:
qMap[q] = q
if q < actualQubit:
try:
availalbleQ.remove(q)
except ValueError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Q "+ str(q) + " isn't available!",funName,line)
continue
#q >= actualQubit
#because actualQubit is more than useQubit, the actualQubit[0] is always existing
qMap[q] = availalbleQ[0]
availalbleQ.remove(availalbleQ[0])
self.__changeQASMandCNOT(qMap)
return True
#check the CNOT list whether satisfies the constraint of the connectivity
#if satisfies or we can adjust the cnot to satisfy the constraint, return True;
#else return False and store the 'bad' cnot in reasonList
def __adjustCNOT(self,totalConnectivity,reasonList):
cnotNum = len(CNOTList)
ibmNum = 0
for k in self.connectivity:
ibmNum += len(self.connectivity[k])
if cnotNum > ibmNum:
reason = "There are " + str(cnotNum) + " different connectivities in this circuit, but only " + str(ibmNum) + " are allowd in IBM chip!"
reasonList.append(reason)
return False
totalCNOT = {}
cnotQList = []
for cnot in CNOTList:
if cnot[0] not in cnotQList:
cnotQList.append(cnot[0])
if cnot[1] not in cnotQList:
cnotQList.append(cnot[1])
if cnot[0] in totalCNOT:
totalCNOT[cnot[0]].append(cnot[1])
else:
totalCNOT[cnot[0]] = [cnot[1]]
if cnot[1] in totalCNOT:
totalCNOT[cnot[1]].append(cnot[0])
else:
totalCNOT[cnot[1]] = [cnot[0]]
choiceList = []
for cq in totalCNOT:
tmp = []
for tcq in totalConnectivity:
if len(totalConnectivity[tcq]) >= len(totalCNOT[cq]):
tmp.append(tcq)
choiceList.append(tmp)
#the solution space is choiceList[]
solution = [-1] * len(cnotQList)
newMaps = self.__backTrace(0,len(cnotQList),solution,totalConnectivity,choiceList,cnotQList)
if newMaps != None:
self.__changeQASMandCNOT(newMaps)
return True
else:
reason = "Can't adjust the connectivity in your circuit to satisfy the requirement of the IBM chip!"
reasonList.append(reason)
return False
def __backTrace(self,depth,N,solution,tc,choiceList,cnotQList):
if depth >= N:
dic = self.__getQubitMap(cnotQList,solution,tc)
if self.__checkMapConstraint(dic,tc):
return dic
else:
return None
else:
for i in range(0,len(choiceList[depth])):
if choiceList[depth][i] in solution[0:depth+1]:
continue
else:
solution[depth] = choiceList[depth][i]
res = self.__backTrace(depth+1,N,solution,tc,choiceList,cnotQList)
if res != None:
return res
#use two list to construct a map: the key is from the first list and the value is from the second list
#note: the dimension of l1 and l2 must be same with each other
#and if there is qubits in qubitList but not in CNOTList, we should append the item in the dict
def __getQubitMap(self,l1,l2,tc):
if len(l1) != len(l2):
try:
raise IBMError("The dimension of the Qubit list should be same with the dimension of the solution!")
except IBMError as ie:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg(ie.value,funName,line)
dic = {}
availalbleQ = [i for i in range(0,len(tc))]
for index in range(0,len(l1)):
dic[l1[index]] = l2[index]
availalbleQ.remove(l2[index])
for q in qubitList:
if q in dic:
continue
else:
dic[q] = availalbleQ[0]
del availalbleQ[0]
return dic
#adjust the copy of CNOTList according to the map, and call the __checkAllConstraint
def __checkMapConstraint(self,maps,tc):
if len(maps) != len(qubitList):
return False
cCNOTList = CNOTList.copy()
for i in range(0,len(cCNOTList)):
cCNOTList[i] = [maps[cCNOTList[i][0]],maps[cCNOTList[i][1]]]
return self.__checkAllConstraint(cCNOTList,tc)
#change the global parameter qubitList, QASM, CNOTList according to the qmap
def __changeQASMandCNOT(self,qMap):
global QASM
#change the id in CNOTList to satisfy the requirement
for i in range(0,len(CNOTList)):
for j in range(0,len(CNOTList[i])):
CNOTList[i][j] = qMap[CNOTList[i][j]]
#change the QASM code
mode = re.compile(r'\d+')
for l in range(0,len(QASM)):
if l == 0:
continue
else:
qs = mode.findall(QASM[l])
if len(qs) == 1:
#single-qubit gate
QASM[l] = QASM[l].replace("[" + str(qs[0]) + "]","[" + str(qMap[int(qs[0])]) + "]")
elif len(qs) == 2 and qs[0] == qs[1]:
#measurement
QASM[l] = QASM[l].replace("[" + str(qs[0]) + "]","[" + str(qMap[int(qs[0])]) + "]")
else:
#multi-qubits gate
newQASM = QASM[l].split(" ")[0] + " "
qubit = QASM[l].split(" ")[1].split(",")
for qi in range(0,len(qs)):
newQASM += qubit[qi].replace("[" + str(qs[qi]) + "]","[" + str(qMap[int(qs[qi])]) + "]")
if qi != len(qs)-1:
newQASM += ","
QASM[l] = newQASM
#change the qubitList according to the qMap
for qi in range(0,len(qubitList)):
if qubitList[qi] in qMap:
qubitList[qi] = qMap[qubitList[qi]]
#the the max neighbor in totalconnectivity
def __getMaxNeighbor(self,tc):
if type(tc) != dict:
try:
raise TypeError
except TypeError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("The type of the argument must be Dict!",funName,line)
maxs = 0
for c in tc:
maxs = max(maxs,len(tc[c]))
return maxs
#check cnot whether satisfies the constraint
#the format of cnot should be [1,3]
def __checkSingleConstraint(self,cnot:list,tc):
if len(cnot) != 2:
try:
raise ValueError
except ValueError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("The cnot should be two-dimension!",funName,line)
cQ = cnot[0]
tQ = cnot[1]
if cQ in tc and tQ in tc[cQ]:
#directly satisfy the constraint
return True
else:
return False
def __checkAllConstraint(self,cnotList,tc):
for c in cnotList:
if self.__checkSingleConstraint(c,tc):
continue
else:
return False
return True
#get the legal cnot gate in current device
def __getLegalCNOT(self):
legalCList = []
for cQ in self.connectivity:
for tQ in self.connectivity[cQ]:
if [cQ,tQ] not in legalCList:
legalCList.append([cQ,tQ])
if [tQ,cQ] not in legalCList:
legalCList.append([tQ,cQ])
return legalCList
#modify the qasm code by adding H to reverse the current CNOT
def __reverseCNOT(self):
lineN = 0
while lineN < len(QASM):
if 'cx' in QASM[lineN]:
q = QASM[lineN].split(" ")[1]
strs = q.split(',')
#get the id of control-qubit and target-qubit
tQ = int(strs[1][2])
cQ = int(strs[0][2])
if tQ in self.connectivity and cQ in self.connectivity[tQ]:
#add H gate to satisfy the constraint
hExternal = "h q[" + str(cQ) + "];\r\nh q[" + str(tQ) + "];\r\n"
gateStr = "cx q[" + str(cQ) + "],q[" + str(tQ) + "];"
if gateStr in QASM[lineN]:
QASM.insert(lineN,hExternal)
QASM[lineN+1] = "cx q[" + str(tQ) + "],q[" + str(cQ) + "];\r\n"
QASM.insert(lineN+2,hExternal)
lineN += 1
#execute the code
def executeQASM(self,experimentName = None):
code = self.__canExecute()
circuit = checkEnvironment()
if code == None:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("The QASM code generated by QuanSim doesn't satisfy the requirement of IBMQX!",funName,line)
return False
try:
data = self.api.run_experiment(code,self.device,self.shot,experimentName,timeout = 300)
except ConnectionError as ce:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Can't connect to the server, Please try again later!",funName,line)
res = self.__analyseData(data)
#get the result analysed
if res['status'] == 0:
#waiting
blank = {}
resOfExe = self.__query(res['msg'])
if resOfExe == blank:
print("The experiment is still waiting in the executive queue, We'll query every 5 seconds! You can also find the result on the Web of IBM Quantum Experience!")
while resOfExe == blank:
#still no result
print("Waiting in the queue...")
#sleep for 5 seconds
time.sleep(5)
resOfExe = self.__query(res['msg'])
#get the result
self.__writeRaWData(resOFExe)
measure = resOFExe['measure']
qubits = measure['qubits']
labels = measure['labels']
values = measure['values']
rList = []
for i in range(0,len(labels)):
states = ""
for q in qubits:
state = labels[i][len(labels[i])-q-1]
states += state
rList.append([states,values[i]])
dataMsg = "-" * 30
dataMsg += " the data of IBMQX "
dataMsg += "-" * 31
dataMsg += "\r\n"
dataMsg += "Result:\r\n"
for r in rList:
prob = float(r[1]) * 100
dataMsg += " "*8+"|" + r[0] + ">----%.2f%%"%(prob)
dataMsg += "\r\n"
dataMsg += "-" * 80
print(dataMsg)
self.__writeAnalyData(dataMsg)
elif res['status'] == 2:
#wrong
self.__writeRaWData(data)
else:
#successful
self.__writeRaWData(data)
self.__writeAnalyData(res['msg'])
#analyse the date
##############################################
#the return value is a Dict{'status':0,"msg":None}
#if successful, then status is 1 and msg is the data analysed
#if waiting, then status is 0 and msg is the id of the experiment
#if error, then status is 2 and msg is None
###############################################
def __analyseData(self,data):
res = {'status':0,"msg":None}
try:
#judge the status of the experiment
status = data['status']
if status == "WORKING_IN_PROGRESS":
#still in queue
#then get the id of the experiment
ids = data['idExecution']
res['status'] = 0
res['msg'] = ids
return res
elif status == "":
#successful
status = data['status']
result = data['result']
measure = result['measure']
qubits = measure['qubits']
labels = measure['labels']
values = measure['values']
rList = []
for i in range(0,len(labels)):
states = ""
for q in qubits:
state = labels[i][len(labels[i])-q-1]
states += state
rList.append([states,values[i]])
dataMsg = "-" * 30
dataMsg += " the data of IBMQX "
dataMsg += "-" * 31
dataMsg += "\r\n"
dataMsg += "Result:\r\n"
for r in rList:
prob = float(r[1]) * 100
dataMsg += " "*8+"|" + r[0] + ">----%.2f%%"%(prob)
dataMsg += "\r\n"
dataMsg += "-" * 80
print(dataMsg)
res['status'] = 1
res['msg'] = dataMsg
return res
else:
#there is something wrong
res['status'] = 2
res['msg'] = None
return 1
except KeyError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("There are some keys aren't in the result returned by IBMQX!",funName,line)
#write the raw data of IBMQX to file
def __writeRaWData(self,rawData):
if rawData == "":
return False
#write the date to file
try:
rawDataFile = open(circuit.urls + "/IBMQX/rawData_IBMQX.txt","w",encoding='utf-8')
rawDataFile.write(rawData)
rawDataFile.close()
except IOError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Can't write the raw data of IBMQX to rawData_IBMQX.txt!",funName,line)
#write the data analysed of IBMQX to file
def __writeAnalyData(self,data):
if data == "":
return False
#write the data to Data_IBMQX.txt
try:
dataFile = open(circuit.urls + "/IBMQX/Data_IBMQX.txt","w",encoding='utf-8')
dataFile.write(Data)
dataFile.close()
except IOError:
info = get_curl_info()
funName = info[0]
line = info[1]
writeErrorMsg("Can't write the data of IBMQX to Data_IBMQX.txt!",funName,line)
#get information (including the Code information) about a specific Execution of a Code,
#the parameter "id_execution" is the id of the experiment
def __query(self,id_execution):
res = self.api.get_result_from_execution(id_execution)
# print(res)
return res
def test(self):
res = self.api.get_result_from_execution("ff5b9f00a8333ee1f5586699c45a5bc8")
print(res)
|
|
import random
import math
from xdict import utils
#####
def gen_random_ascii_chr():
return(chr(math.floor(random.random() * 256 + 1) % 256))
def gen_random_ascii_str(length):
rslt = ""
for i in range(0,length):
rslt = rslt + gen_random_ascii_chr()
return(rslt)
def gen_random_str_of_length(length,**kwargs):
'''JZ._$'''
if('char_set' in kwargs):
char_set = kwargs['char_set']
else:
char_set = "0123456789abcdefghijklmnopqrstuvwxyz"
rslt = ''
for i in range(0,length):
seq = math.floor(random.random()*36+1) % 36
c = char_set[seq]
rslt = rslt + c
return(rslt)
######
def gen_random_word(**kwargs):
if('max_word_length' in kwargs):
max_word_length = kwargs['max_word_length']
else:
max_word_length = 10
length_range = random.randrange(max_word_length+1)
if('char_set' in kwargs):
char_set = kwargs['char_set']
else:
char_set = 'abcdefghijklmnopqrstuvwxzyABCDEFGHIJKLMNOPQRSTUVWXYZ'
if('fixed_word_length' in kwargs):
length_range = kwargs['fixed_word_length']
else:
pass
len = char_set.__len__()
word = ''
for i in range(length_range):
word = ''.join((word,char_set[random.randrange(len)]))
return(word)
def gen_random_root():
rand_num = random.randrange(3)
if(rand_num == 0):
return({})
elif(rand_num == 1):
return([])
elif(rand_num == 2):
return(())
else:
return({})
def gen_random_type_via_ratio(**kwargs):
if('d' in kwargs):
rd = kwargs['d']
else:
rd = 0
if('l' in kwargs):
rl = kwargs['l']
else:
rl = 0
if('t' in kwargs):
rt = kwargs['t']
else:
rt = 0
if('s' in kwargs):
rs = kwargs['s']
else:
rs = 0
if('v' in kwargs):
rv = kwargs['v']
else:
rv = 0
if((rd+rl+rt+rs+rv) == 0):
rd = 1
rl = 1
rt = 1
rs = 1
rv = 1
total = rd + rl + rt + rs + rv
rand_num = random.randrange(1,total+1)
bd = rd + 1
bl = bd + rl
bt = bl + rt
bs = bt + rs
bv = bs + rv
if(rand_num in range(1,bd)):
return('d')
elif(rand_num in range(bd,bl)):
return('l')
elif(rand_num in range(bl,bt)):
return('t')
elif(rand_num in range(bt,bs)):
return('s')
elif(rand_num in range(bs,bv)):
return('v')
def gen_random_depth(min_dep,max_dep):
rand_num = random.randrange(min_dep,max_dep+1)
return(rand_num)
def gen_random_siblings_size(min_size,max_size):
rand_num = random.randrange(min_size,max_size+1)
return(rand_num)
def gen_random_prefix_array(prefix,size,max_word_length=10):
l = []
for i in range(0,size):
l.append(i)
new_l = []
while(l.__len__()>0):
rand_num = random.randrange(0,l.__len__())
ele = l.pop(rand_Num)
ele = ''.join((prefix,'_',str(ele+1),'_',gen_random_word(max_word_length)))
new_L.append(ele)
return(new_l)
def gen_random_prefix_array(prefix,size,max_word_length=10):
l = []
for i in range(0,size):
l.append(i)
new_l = []
while(l.__len__()>0):
rand_num = random.randrange(0,l.__len__())
ele = l.pop(rand_num)
ele = ''.join((prefix,'_',str(ele+1),'_',gen_random_word(max_word_length=max_word_length)))
new_l.append(ele)
return(new_l)
def gen_random_fixed_word_length_prefix_array(prefix,size,fixed_word_length=10):
l = []
for i in range(0,size):
l.append(i)
new_l = []
while(l.__len__()>0):
rand_num = random.randrange(0,l.__len__())
ele = l.pop(rand_num)
ele = ''.join((prefix,'_',str(ele+1),'_',gen_random_word(fixed_word_length=fixed_word_length)))
new_l.append(ele)
return(new_l)
def gen_possible_leaf(data_type,max_value_seq):
if(data_type=='d'):
return({})
elif(data_type=='l'):
return([])
elif(data_type=='t'):
return(())
elif(data_type=='s'):
return(set({}))
else:
rand_num = random.randrange(1,max_value_seq+1)
value = ''.join(('v','_',str(rand_num)))
return(value)
def gen_children(recursive,**kwargs):
if('max_value_seq' in kwargs):
max_value_seq = kwargs['max_value_seq']
else:
max_value_seq = 8
if('max_siblings' in kwargs):
max_siblings = kwargs['max_siblings']
else:
max_siblings = 19
if('d' in kwargs):
rd = kwargs['d']
else:
rd = 5
if('l' in kwargs):
rl = kwargs['l']
else:
rl = 5
if('t' in kwargs):
rt = kwargs['t']
else:
rt = 1
if('s' in kwargs):
rs = kwargs['s']
else:
rs = 1
if('v' in kwargs):
rv = kwargs['v']
else:
rv = 25
max_siblings = gen_random_siblings_size(1,max_siblings+1)
if(utils.is_list(recursive)):
for i in range(0,max_siblings):
data_type = gen_random_type_via_ratio(d=rd,l=rl,t=rt,s=rs,v=rv)
next = gen_possible_leaf(data_type,max_value_seq)
recursive.append(next)
elif(utils.is_tuple(recursive)):
temp = list(recursive)
for i in range(0,max_siblings):
data_type = gen_random_type_via_ratio(d=rd,l=rl,t=rt,s=rs,v=rv)
next = gen_possible_leaf(data_type,max_value_seq)
temp.append(next)
recursive = tuple(temp)
elif(utils.is_set(recursive)):
for i in range(0,max_siblings):
data_type = gen_random_type_via_ratio(d=0,l=0,t=0,s=0,v=rv)
next = gen_possible_leaf(data_type,max_value_seq)
recursive.add(next)
if(utils.is_dict(recursive)):
keys = gen_random_prefix_array('key',max_siblings)
for i in range(0,max_siblings):
data_type = gen_random_type_via_ratio(d=rd,l=rl,t=rt,s=rs,v=rv)
next = gen_possible_leaf(data_type,max_value_seq)
recursive[keys[i]] = next
else:
pass
return(recursive)
def gen_random_recursive_data(**kwargs):
# tuple can only be leaf , coz tuple mechanism is a little different
if('max_depth' in kwargs):
max_depth = kwargs['max_depth']
else:
max_depth = 6
if('root' in kwargs):
if(utils.is_recursive_type(kwargs['root'])):
root = kwargs['root']
try:
root.clear()
except:
root = ()
else:
pass
else:
root = gen_random_root()
else:
root = gen_random_root()
unhandled = [root]
if(utils.is_tuple(root)):
root = unhandled
root_is_tuple = 1
else:
root_is_tuple = 0
lv = -1
while(lv<max_depth):
next_unhandled = []
for i in range(0,unhandled.__len__()):
if(utils.is_recursive_type(unhandled[i])):
unhandled[i] = gen_children(unhandled[i])
if(utils.is_dict(unhandled[i])):
for key in unhandled[i]:
v = unhandled[i][key]
if(utils.is_recursive_type(v)):
next_unhandled.append(v)
elif(utils.is_list(unhandled[i])):
for j in range(0,unhandled[i].__len__()):
v = unhandled[i][j]
if(utils.is_recursive_type(v)):
next_unhandled.append(v)
elif(utils.is_tuple(unhandled[i])):
for j in range(0,unhandled[i].__len__()):
v = unhandled[i][j]
if(utils.is_recursive_type(v)):
next_unhandled.append(v)
else:
pass
else:
pass
unhandled = next_unhandled
lv=lv+1
if(root_is_tuple):
root = root[0]
else:
pass
return(root)
def gen_random_recursive_only_dict_data(**kwargs):
if('d' in kwargs):
rd = kwargs['d']
else:
rd = 1
if('v' in kwargs):
rv = kwargs['v']
else:
rv = 5
if('max_depth' in kwargs):
max_depth = kwargs['max_depth']
else:
max_depth = 6
root = {}
unhandled = [root]
lv = -1
while(lv<max_depth):
next_unhandled = []
for i in range(0,unhandled.__len__()):
if(utils.is_recursive_type(unhandled[i])):
unhandled[i] = gen_children(unhandled[i],d=rd,l=0,t=0,s=0,v=rv)
for key in unhandled[i]:
v = unhandled[i][key]
if(utils.is_recursive_type(v)):
next_unhandled.append(v)
else:
pass
unhandled = next_unhandled
lv=lv+1
return(root)
def gen_random_recursive_only_list_data(**kwargs):
if('l' in kwargs):
rl = kwargs['l']
else:
rl = 1
if('v' in kwargs):
rv = kwargs['v']
else:
rv = 5
if('max_depth' in kwargs):
max_depth = kwargs['max_depth']
else:
max_depth = 6
root = []
unhandled = [root]
lv = -1
while(lv<max_depth):
next_unhandled = []
for i in range(0,unhandled.__len__()):
if(utils.is_recursive_type(unhandled[i])):
unhandled[i] = gen_children(unhandled[i],d=0,l=rl,t=0,s=0,v=rv)
for j in range(0,unhandled[i].__len__()):
v = unhandled[i][j]
if(utils.is_recursive_type(v)):
next_unhandled.append(v)
else:
pass
unhandled = next_unhandled
lv=lv+1
return(root)
def gen_cowrol_table(total_rows,total_cols):
matrix = {}
for i in range(0,total_rows):
matrix[i] = {}
for j in range(0,total_cols):
matrix[i][j] = gen_random_word()
return(matrix)
################
################
import efuntool.efuntool as eftl
import random
import elist.elist as elel
def get_rand_name(**kwargs):
dflt_init_chars = "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
dflt_other_chars = "_0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
lngth = eftl.dflt_kwargs("len",4,**kwargs)
init_chs = eftl.dflt_kwargs("init",dflt_init_chars,**kwargs)
other_chs = eftl.dflt_kwargs("other",dflt_other_chars,**kwargs)
init_ch = random.choice(init_chs)
others = random.choices(other_chs,k=lngth-1)
other = elel.join(others,"")
return(init_ch +other)
|
|
# Which key(s) were pressed
# What chars to print (if any)
# Keycode(s) generating event
import string
from .unikeys import UnicodeAsciiKeys
# These are used for the names of ctrl keys, etc.
ASCII_NAMES = {
'\t': 'tab',
' ': 'space', # 0x20
'!': 'exclamation', # 0x21
'"': 'double quote', # 0x22
'#': 'hash', # 0x23
'$': 'dollar', # 0x24
'%': 'percent', # 0x25
'&': 'ampersand', # 0x26
'\'': 'single quote', # 0x27
'(': 'open paren', # 0x28
')': 'close paren', # 0x29
'*': 'asterisk', # 0x2a
'+': 'plus', # 0x2b
',': 'comma', # 0x2c
'-': 'minus', # 0x2d
'.': 'period', # 0x2e
'/': 'slash', # 0x2f
':': 'colon', # 0x3a
';': 'semicolon', # 0x3b
'<': 'less than', # 0x3c
'=': 'equals', # 0x3d
'>': 'greater than', # 0x3e
'?': 'question', # 0x3f
'@': 'at', # 0x40
'[': 'left bracket', # 0x5b
'\\': 'backslash', # 0x5c
']': 'right bracket', # 0x5d
'^': 'caret', # 0x5e
'_': 'underscore', # 0x5f
'`': 'backtick', # 0x60
'{': 'left brace', # 0x7b
'|': 'pipe', # 0x7c
'}': 'right brace', # 0x7d
'~': 'tilde', # 0x7e
}
class UnicodeKeys(object):
# Names from ftp://ftp.unicode.org/Public/UNIDATA/NamesList.txt
NULL = chr(0x00)
START_OF_HEADING = chr(0x01)
class JargonKeys(object):
BANG = '!'
SHRIEK = '!'
DOUBLE_QUOTE = '"'
QUOTE = '"'
NUMBER_SIGN = '#'
SHARP = '#'
OCTOTHORPE = '#'
BUCK = '$'
CASH = '$'
STRING = '$'
MOD = '%'
GRAPES = '%'
AMPERSAND = '&'
AMP = '&'
AND_SIGN = '&'
APOSTROPHE = '\''
PRIME = '\''
TICK = '\''
STAR = '*'
SPLAT = '*'
GLOB = '*'
ADD = '+'
class IntercalKeys(object):
SPOT = '.'
TWO_SPOT = ':'
TAIL = ','
HYBRID = ';'
MESH = '#'
HALF_MESH = '='
SPARK = '\''
BACKSPARK = '`'
WOW = '!'
WHAT = '?'
RABBIT_EARS = '"'
# RABBIT is `"` over `.`
SPIKE = '|'
DOUBLE_OH_SEVEN = '%'
WORM = '-'
ANGLE = '<'
RIGHT_ANGLE = '>'
WAX = '('
WANE = ')'
U_TURN = '['
U_TURN_BACK = ']'
EMBRACE = '{'
BRACELET = '}'
SPLAT = '*'
AMPERSAND = '&'
V = 'V'
BOOK = 'V'
# BOOKWORM is `-` over `V`
BIG_MONEY = '$'
# CHANGE is cent sign
SQUIGGLE = '~'
FLAT_WORM = '_'
# OVERLINE is line on top
INTERSECTION = '+'
SLAT = '/'
BACKSLAT = '\\'
WHIRLPOOL = '@'
# HOOKWORK is logical NOT symbol
SHARK = '^'
SHARKFIN = '^'
# BLOTCH is several characters smashed on top of each other
class VT100StandardModeKeys(object):
# http://www.braun-home.net/michael/mbedit/info/misc/VT100_commands.htm
# http://www.ccs.neu.edu/research/gpc/MSim/vona/terminal/VT100_Escape_Codes.html
F1 = '\x1bOP'
F2 = '\x1bOQ'
F3 = '\x1bOR'
F4 = '\x1bOS'
UP = '\x1b[A'
DOWN = '\x1b[B'
RIGHT = '\x1b[C'
LEFT = '\x1b[D'
class VT100ApplicationsModeKeys(object):
F1 = '\x1bOP'
F2 = '\x1bOQ'
F3 = '\x1bOR'
F4 = '\x1bOS'
UP = '\x1bOA'
DOWN = '\x1bOB'
RIGHT = '\x1bOC'
LEFT = '\x1bOD'
KEYPAD_0 = '\x1bOp'
KEYPAD_1 = '\x1bOq'
KEYPAD_2 = '\x1bOr'
KEYPAD_3 = '\x1bOs'
KEYPAD_4 = '\x1bOt'
KEYPAD_5 = '\x1bOu'
KEYPAD_6 = '\x1bOv'
KEYPAD_7 = '\x1bOw'
KEYPAD_8 = '\x1bOx'
KEYPAD_9 = '\x1bOy'
KEYPAD_MINUS = '\x1bOm'
KEYPAD_COMMA = '\x1bOl'
KEYPAD_PERIOD = '\x1bOn'
KEYPAD_ENTER = '\x1bOM'
class VT220Keys(object):
# F1-F5 didn't exist historically, but were added by later emulators
F1 = '\x1b[11~'
F2 = '\x1b[12~'
F3 = '\x1b[13~'
F4 = '\x1b[14~'
F5 = '\x1b[15~'
# Historical keys
F6 = '\x1b[17~'
F7 = '\x1b[18~'
F8 = '\x1b[19~'
F9 = '\x1b[20~'
F10 = '\x1b[21~'
F11 = '\x1b[23~'
F12 = '\x1b[24~'
# F13+ and key combinations to enter them are of limited usefulness today
class UnixKeys(object):
# Keys found experimentally, of unknown provenance
ESC = '\x1b'
HOME = '\x1b[H'
END = '\x1b[F'
PAGE_UP = '\x1b[5'
PAGE_DOWN = '\x1b[6'
ENTER = '\n'
CR = '\r'
BACKSPACE = '\x7f'
SPACE = ' '
INSERT = '\x1b[2~'
DELETE = '\x1b[3~'
class AlternativeUnixFunctionKeys(object):
# Unsure origin: alternate V220 mode?
F1 = '\x1bO11~'
F2 = '\x1bO12~'
F3 = '\x1bO13~'
F4 = '\x1bO14~'
F5 = '\x1bO15~'
F6 = '\x1bO17~'
F7 = '\x1bO18~'
F8 = '\x1bO19~'
F9 = '\x1bO20~'
F10 = '\x1bO21~'
F11 = '\x1bO23~'
F12 = '\x1bO24~'
class WindowsKeys(object):
ESC = '\x1b'
LEFT = '\xe0K'
RIGHT = '\xe0M'
UP = '\xe0H'
DOWN = '\xe0P'
ENTER = '\r'
BACKSPACE = '\x08'
SPACE = ' '
F1 = '\x00;'
F2 = '\x00<'
F3 = '\x00='
F4 = '\x00>'
F5 = '\x00?'
F6 = '\x00@'
F7 = '\x00A'
F8 = '\x00B'
F9 = '\x00C'
F10 = '\x00D'
F11 = '\xe0\x85'
F12 = '\xe0\x86'
INSERT = '\xe0R'
DELETE = '\xe0S'
PAGE_UP = '\xe0I'
PAGE_DOWN = '\xe0Q'
HOME = '\xe0G'
END = '\xe0O'
CTRL_F1 = '\x00^'
CTRL_F2 = '\x00_'
CTRL_F3 = '\x00`'
CTRL_F4 = '\x00a'
CTRL_F5 = '\x00b'
CTRL_F6 = '\x00c'
CTRL_F7 = '\x00d' # Captured by something?
CTRL_F8 = '\x00e'
CTRL_F9 = '\x00f'
CTRL_F10 = '\x00g'
CTRL_F11 = '\xe0\x89'
CTRL_F12 = '\xe0\x8a'
CTRL_HOME = '\xe0w'
CTRL_END = '\xe0u'
CTRL_INSERT = '\xe0\x92'
CTRL_DELETE = '\xe0\x93'
CTRL_PAGE_DOWN = '\xe0v'
CTRL_2 = '\x00\x03'
CTRL_UP = '\xe0\x8d'
CTRL_DOWN = '\xe0\x91'
CTRL_LEFT = '\xe0s'
CTRL_RIGHT = '\xe0t'
CTRL_ALT_A = '\x00\x1e'
CTRL_ALT_B = '\x000'
CTRL_ALT_C = '\x00.'
CTRL_ALT_D = '\x00 '
CTRL_ALT_E = '\x00\x12'
CTRL_ALT_F = '\x00!'
CTRL_ALT_G = '\x00"'
CTRL_ALT_H = '\x00#'
CTRL_ALT_I = '\x00\x17'
CTRL_ALT_J = '\x00$'
CTRL_ALT_K = '\x00%'
CTRL_ALT_L = '\x00&'
CTRL_ALT_M = '\x002'
CTRL_ALT_N = '\x001'
CTRL_ALT_O = '\x00\x18'
CTRL_ALT_P = '\x00\x19'
CTRL_ALT_Q = '\x00\x10'
CTRL_ALT_R = '\x00\x13'
CTRL_ALT_S = '\x00\x1f'
CTRL_ALT_T = '\x00\x14'
CTRL_ALT_U = '\x00\x16'
CTRL_ALT_V = '\x00/'
CTRL_ALT_W = '\x00\x11'
CTRL_ALT_X = '\x00-'
CTRL_ALT_Y = '\x00\x15'
CTRL_ALT_Z = '\x00,'
CTRL_ALT_1 = '\x00x'
CTRL_ALT_2 = '\x00y'
CTRL_ALT_3 = '\x00z'
CTRL_ALT_4 = '\x00{'
CTRL_ALT_5 = '\x00|'
CTRL_ALT_6 = '\x00}'
CTRL_ALT_7 = '\x00~'
CTRL_ALT_8 = '\x00\x7f'
CTRL_ALT_9 = '\x00\x80'
CTRL_ALT_0 = '\x00\x81'
CTRL_ALT_MINUS = '\x00\x82'
CTRL_ALT_EQUALS = '\x00x83'
CTRL_ALT_BACKSPACE = '\x00\x0e'
ALT_F1 = '\x00h'
ALT_F2 = '\x00i'
ALT_F3 = '\x00j'
ALT_F4 = '\x00k'
ALT_F5 = '\x00l'
ALT_F6 = '\x00m'
ALT_F7 = '\x00n'
ALT_F8 = '\x00o'
ALT_F9 = '\x00p'
ALT_F10 = '\x00q'
ALT_F11 = '\xe0\x8b'
ALT_F12 = '\xe0\x8c'
ALT_HOME = '\x00\x97'
ALT_END = '\x00\x9f'
ALT_INSERT = '\x00\xa2'
ALT_DELETE = '\x00\xa3'
ALT_PAGE_UP = '\x00\x99'
ALT_PAGE_DOWN = '\x00\xa1'
ALT_LEFT = '\x00\x9b'
ALT_RIGHT = '\x00\x9d'
ALT_UP = '\x00\x98'
ALT_DOWN = '\x00\xa0'
CTRL_ALT_LEFT_BRACKET = '\x00\x1a'
CTRL_ALT_RIGHT_BRACKET = '\x00\x1b'
CTRL_ALT_SEMICOLON = '\x00\''
CTRL_ALT_SINGLE_QUOTE = '\x00('
CTRL_ALT_ENTER = '\x00\x1c'
CTRL_ALT_SLASH = '\x005'
CTRL_ALT_PERIOD = '\x004'
CTRL_ALT_COMMA = '\x003'
class ControlKeys(object):
def __init__(self, format='CTRL_{}'):
for i in range(0x20):
low_char = chr(i)
high_char = chr(i + 0x40)
name = ASCII_NAMES.get(high_char, high_char).upper()
ctrl_name = format.format(name)
setattr(self, ctrl_name, low_char)
class AsciiKeys(object):
def __init__(
self,
lower_format='{}', upper_format='SHIFT_{}', digit_format='N{}',
ascii_names=ASCII_NAMES,
):
for letter in string.ascii_lowercase:
name = lower_format.format(letter.upper())
setattr(self, name, letter)
for letter in string.ascii_uppercase:
name = upper_format.format(letter.upper())
setattr(self, name, letter)
for digit in string.digits:
name = digit_format.format(digit)
setattr(self, name, digit)
for char, name in ascii_names.items():
name = name.upper().replace(' ', '_')
setattr(self, name, char)
class Keys(object):
def __init__(self, keyclasses):
self.__names = dict() # Map of codes -> names
self.__codes = dict() # Map of names -> codes
self.__escapes = set()
for keyclass in keyclasses:
for name in dir(keyclass):
if self._is_key_name(name):
code = getattr(keyclass, name)
self.register(name, code)
def register(self, name, code):
if name not in self.__codes:
self.__codes[name] = code
if code not in self.__names:
self.__names[code] = name
for i in range(len(code)):
self.__escapes.add(code[:i])
# Update towards canonicity
while True:
canon_code = self.canon(code)
canon_canon_code = self.canon(canon_code)
if canon_code != canon_canon_code:
self.__codes[self.name(code)] = canon_canon_code
else:
break
while True:
canon_name = self.name(self.code(name))
canon_canon_name = self.name(self.code(canon_name))
if canon_name != canon_canon_name:
self.__names[self.code(name)] = canon_canon_name
else:
break
@property
def escapes(self):
return self.__escapes
@property
def names(self):
return self.__codes.keys()
def name(self, code):
return self.__names.get(code)
def code(self, name):
return self.__codes.get(name)
def canon(self, code):
name = self.name(code)
return self.code(name) if name else code
def __getattr__(self, name):
code = self.code(name)
if code is not None:
return code
else:
return self.__getattribute__(name)
def _is_key_name(self, name):
return name == name.upper() and not name.startswith('_')
def _make_escapes(codes):
escapes = set()
for code in codes:
for i in range(len(code)):
escapes.add(code[:i])
return escapes
unix_keys = Keys([
VT100StandardModeKeys(),
VT100ApplicationsModeKeys(),
VT220Keys(),
UnixKeys(),
AlternativeUnixFunctionKeys(),
AsciiKeys(),
ControlKeys(),
UnicodeAsciiKeys(),
JargonKeys(),
IntercalKeys()
])
windows_keys = Keys([
WindowsKeys(),
AsciiKeys(),
ControlKeys(),
UnicodeAsciiKeys(),
JargonKeys(),
IntercalKeys()
])
PLATFORM_KEYS = {
'unix': unix_keys,
'windows': windows_keys,
}
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import keystone
from openstack_dashboard import policy
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.identity.projects \
import tables as project_tables
from openstack_dashboard.dashboards.identity.projects \
import workflows as project_workflows
from openstack_dashboard.dashboards.project.overview \
import views as project_views
PROJECT_INFO_FIELDS = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
INDEX_URL = "horizon:identity:projects:index"
class TenantContextMixin(object):
@memoized.memoized_method
def get_object(self):
tenant_id = self.kwargs['tenant_id']
try:
return api.keystone.tenant_get(self.request, tenant_id, admin=True)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project information.'),
redirect=reverse(INDEX_URL))
def get_context_data(self, **kwargs):
context = super(TenantContextMixin, self).get_context_data(**kwargs)
context['tenant'] = self.get_object()
return context
class IndexView(tables.DataTableView):
table_class = project_tables.TenantsTable
template_name = 'identity/projects/index.html'
def has_more_data(self, table):
return self._more
def get_data(self):
tenants = []
marker = self.request.GET.get(
project_tables.TenantsTable._meta.pagination_param, None)
domain_context = self.request.session.get('domain_context', None)
if policy.check((("identity", "identity:list_projects"),),
self.request):
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
domain=domain_context,
paginate=True,
marker=marker)
except Exception:
self._more = False
exceptions.handle(self.request,
_("Unable to retrieve project list."))
elif policy.check((("identity", "identity:list_user_projects"),),
self.request):
try:
tenants, self._more = api.keystone.tenant_list(
self.request,
user=self.request.user.id,
paginate=True,
marker=marker,
admin=False)
except Exception:
self._more = False
exceptions.handle(self.request,
_("Unable to retrieve project information."))
else:
self._more = False
msg = \
_("Insufficient privilege level to view project information.")
messages.info(self.request, msg)
return tenants
class ProjectUsageView(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'identity/projects/usage.html'
csv_response_class = project_views.ProjectUsageCsvRenderer
csv_template_name = 'project/overview/usage.csv'
def get_data(self):
super(ProjectUsageView, self).get_data()
return self.usage.get_instances()
class CreateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.CreateProject
def get_initial(self):
initial = super(CreateProjectView, self).get_initial()
# Set the domain of the project
domain = api.keystone.get_default_domain(self.request)
initial["domain_id"] = domain.id
initial["domain_name"] = domain.name
# get initial quota defaults
try:
quota_defaults = quotas.get_default_quota_data(self.request)
try:
if api.base.is_service_enabled(self.request, 'network') and \
api.neutron.is_quotas_extension_supported(
self.request):
# TODO(jpichon): There is no API to access the Neutron
# default quotas (LP#1204956). For now, use the values
# from the current project.
project_id = self.request.user.project_id
quota_defaults += api.neutron.tenant_quota_get(
self.request,
tenant_id=project_id)
except Exception:
error_msg = _('Unable to retrieve default Neutron quota '
'values.')
self.add_error_to_step(error_msg, 'update_quotas')
for field in quotas.QUOTA_FIELDS:
initial[field] = quota_defaults.get(field).limit
except Exception:
error_msg = _('Unable to retrieve default quota values.')
self.add_error_to_step(error_msg, 'update_quotas')
return initial
class UpdateProjectView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateProject
def get_initial(self):
initial = super(UpdateProjectView, self).get_initial()
project_id = self.kwargs['tenant_id']
initial['project_id'] = project_id
try:
# get initial project info
project_info = api.keystone.tenant_get(self.request, project_id,
admin=True)
for field in PROJECT_INFO_FIELDS:
initial[field] = getattr(project_info, field, None)
# Retrieve the domain name where the project belong
if keystone.VERSIONS.active >= 3:
try:
domain = api.keystone.domain_get(self.request,
initial["domain_id"])
initial["domain_name"] = domain.name
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project domain.'),
redirect=reverse(INDEX_URL))
# get initial project quota
quota_data = quotas.get_tenant_quota_data(self.request,
tenant_id=project_id)
if api.base.is_service_enabled(self.request, 'network') and \
api.neutron.is_quotas_extension_supported(self.request):
quota_data += api.neutron.tenant_quota_get(self.request,
tenant_id=project_id)
for field in quotas.QUOTA_FIELDS:
initial[field] = quota_data.get(field).limit
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve project details.'),
redirect=reverse(INDEX_URL))
return initial
|
|
import numpy as np
# Delayed import: scipy.optimize
# It is imported in the minimization function, so the main functionality
# of this module is preserved even if SciPy is not installed.
def ElasticConstants(atoms, symmetry, minimize=False, **kwargs):
"""Calculate the anisotropic elastic constants of a system.
The atoms must be a periodic system with an orthorhombic cell,
and the lattice constant(s) should be the equilibrium value(s).
If not, the function will crash, unless called with
minimize=True to perform an (inefficient) minimization first.
The atoms are modified, but returned to the original state,
except if minimize=True in which case the unit cell is optimized.
The symmetry of the system must be specified. Currently,
the following symmetries are allowed::
symmetry='cubic'
Returns C11, C12, C44 and B.
Note that B = (C11 + 2*C12)/3
symmetry='hexagonal'
Returns C11, C12, C13, C33, C44 and B.
Note that B is given by the other constants (but the
expression is not trivial)
Note that the system must reflect the underlying symmetry
(not checked), and that the unit cell must be orthorombic
(may be relaxed later for hexagonal systems).
"""
ucell = atoms.get_cell()
for i in range(3):
for j in range(3):
if i != j and abs(ucell[i,j]) > 1e-10:
raise ValueError("Unit cell of atoms is not orthorhombic.")
if minimize:
minimize_unit_cell(atoms, symmetry)
if symmetry.lower() == 'cubic':
return _elastic_constants_cubic(atoms, **kwargs)
elif symmetry.lower() == 'hexagonal':
return _elastic_constants_hexagonal(atoms, **kwargs)
else:
raise ValueError('Symmetry "%s" not supported' % (symmetry,))
def _elastic_constants_cubic(atoms, debug=False):
"""Calculate C11, C12, C44 and B for a system with cubic symmetry."""
e0 = atoms.get_potential_energy()
u0 = atoms.get_cell()
C11 = elastic_constant('C11', atoms, e0, u0, (1,0,0,0,0,0),
debug=debug)
B = elastic_constant('B', atoms, e0, u0,
(1/3.0, 1/3.0, 1/3.0, 0, 0, 0),
debug=debug)
C44 = elastic_constant('C44', atoms, e0, u0,
(0,0,0,1/2.0,0,0), debug=debug)
C12 = (3*B - C11)/2.0
return (C11, C12, C44, B)
def _elastic_constants_hexagonal(atoms, debug=False, sanity=True):
"""Calculate C11, C12, C13, C33, C44 and B for a hexagonal system."""
e0 = atoms.get_potential_energy()
u0 = atoms.get_cell()
C11 = elastic_constant('C11', atoms, e0, u0, (1,0,0,0,0,0),
debug=debug)
C33 = elastic_constant('C33', atoms, e0, u0, (0,0,1,0,0,0),
debug=debug)
C11_C12 = elastic_constant('2C11+2C12', atoms, e0, u0,
(1,1,0,0,0,0), debug=debug) / 2
C12 = C11_C12 - C11
C11_C33_2C13 = elastic_constant('C11+C33+2C13', atoms, e0, u0,
(1,0,1,0,0,0), debug=debug)
C13 = 0.5 * (C11_C33_2C13 - C11 - C33)
C44 = elastic_constant('C44', atoms, e0, u0,
(0,0,0,1/2.0,0,0), debug=debug)
C66 = elastic_constant('C66', atoms, e0, u0,
(0,0,0,0,0,1/2.0), debug=debug)
C66_alt = 0.5 * (C11 - C12)
devC66 = 2 * abs(C66 - C66_alt) / (C66 + C66_alt)
if debug or devC66 > 0.02:
print "Deviation in C66: %.1f%%" % (100 * devC66,)
if devC66 > 0.02 and sanity:
raise RuntimeError("Deviation in C66: %.1f%% > 2%%: Not a hexagonal system?"
% (100 * devC66,))
alpha = (C11 + C12 - 2*C13) / (C33 - C13)
if debug:
print "alpha =", alpha
B = (2 * C11 + 2 * C12 + 4 * alpha * C13
+ C33 * alpha * alpha) / ((2 + alpha) * (2 + alpha))
return (C11, C12, C13, C33, C44, B)
def elastic_constant(name, atoms, e0, u0, mode, strain=0.007, debug=False):
"""Calculate an elastic constant.
Parameters::
name
Name of the constant (a string).
atoms
The atoms.
e0
Energy in the equilibrium configuration.
u0
Unit cell in the equilibrium configuration.
mode
The deformation mode as six numbers, giving weights
to the six strain components.
"""
strainlist = (-1.0, 0.5, 0, 0.5, 1.0)
energies = []
order = np.array([[0, 5, 4],
[5, 1, 3],
[4, 3, 2]])
straintensor = np.array(mode)[order]
if debug:
print "%s unit strain tensor:" % (name,)
print straintensor
for s in strainlist:
if s == 0:
energies.append(e0)
else:
# Apply strain
s = s * strain * straintensor + np.identity(3)
atoms.set_cell(np.dot(u0, s), scale_atoms=True)
energies.append(atoms.get_potential_energy())
atoms.set_cell(u0, scale_atoms=True) # Reset atoms
fit0 = np.poly1d(np.polyfit(strain * np.array(strainlist), energies, 3))
fit1 = np.polyder(fit0, 1)
fit2 = np.polyder(fit1, 1)
x0 = None
for x in np.roots(fit1):
if fit2(x) > 0:
if x0 is not None:
raise RuntimeError("More than two roots found.")
assert x0 is None
x0 = x
if x0 is None:
raise RuntimeError("No roots found.")
if np.abs(x0) > 0.5 * strain:
raise RuntimeError("Unreasonable root (%f): " % (x0,) +
"Maybe the system was not at the equilibrium configuration")
if debug:
print "Root:", x0
value = fit2(x0)
return value / atoms.get_volume()
def minimize_unit_cell(atoms, symmetry):
#from scipy.optimize import fmin_powell, fmin
from asap3.Tools.ParameterOptimization.ScipyFmin import fmin
if symmetry == 'cubic':
var = [1.0] # Scaling
elif symmetry == 'hexagonal':
var = [1.0, 1.0]
else:
raise ValueError('Symmetry "%s" not supported' % (symmetry,))
u0 = atoms.get_cell()
def energy(v, a=atoms, u=u0):
newcell = v[0] * u
if len(v) > 1:
newcell[2] *= v[1]
a.set_cell(newcell, scale_atoms=True)
#if len(v) > 1:
# print "fmin: %.4f %7.4f %8.4f %8.4f" % (v[0], v[1], a.get_positions()[1792,2], a.get_potential_energy())
return a.get_potential_energy()
xopt, fopt, iter, calls, flag = fmin(energy, var, delta=0.01, full_output=True, disp=False)
#print "Minimize unit cell:", xopt, fopt, iter, calls
if __name__ == '__main__':
from asap3 import EMT, units, EMThcpParameters
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.hexagonal import HexagonalClosedPacked
print "Calculating cubic constants for Cu"
atoms = FaceCenteredCubic(size=(5,5,5), symbol='Cu',
latticeconstant=3.59495722231)
atoms.set_calculator(EMT())
e = ElasticConstants(atoms, 'cubic')
print np.array(e) / units.GPa
print "Pretending that Cu is hexagonal"
e = ElasticConstants(atoms, 'hexagonal', sanity=False)
print np.array(e) / units.GPa
print "Calculating elastic constants for Ru"
atoms = HexagonalClosedPacked(size=(5,5,5), symbol='Ru',
directions=[[1,-2,1,0],
[1,0,-1,0],
[0,0,0,1]])
atoms.set_calculator(EMT(EMThcpParameters()))
e = ElasticConstants(atoms, 'hexagonal', minimize=True)
print np.array(e) / units.GPa
print "The EMT values are not even close to experimental values!"
print "Pretending Ru is cubic"
e = ElasticConstants(atoms, 'cubic')
print np.array(e) / units.GPa
|
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'axis_fifo'
testbench = 'test_axis_frame_fifo_64'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DEPTH = 512
DATA_WIDTH = 64
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = (DATA_WIDTH/8)
LAST_ENABLE = 1
ID_ENABLE = 1
ID_WIDTH = 8
DEST_ENABLE = 1
DEST_WIDTH = 8
USER_ENABLE = 1
USER_WIDTH = 1
PIPELINE_OUTPUT = 2
FRAME_FIFO = 1
USER_BAD_FRAME_VALUE = 1
USER_BAD_FRAME_MASK = 1
DROP_BAD_FRAME = 1
DROP_WHEN_FULL = 0
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tid = Signal(intbv(0)[ID_WIDTH:])
s_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
m_axis_tready = Signal(bool(0))
# Outputs
s_axis_tready = Signal(bool(0))
m_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
m_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
m_axis_tvalid = Signal(bool(0))
m_axis_tlast = Signal(bool(0))
m_axis_tid = Signal(intbv(0)[ID_WIDTH:])
m_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
m_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
status_overflow = Signal(bool(0))
status_bad_frame = Signal(bool(0))
status_good_frame = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=s_axis_tdata,
tkeep=s_axis_tkeep,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tid=s_axis_tid,
tdest=s_axis_tdest,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
clk,
rst,
tdata=m_axis_tdata,
tkeep=m_axis_tkeep,
tvalid=m_axis_tvalid,
tready=m_axis_tready,
tlast=m_axis_tlast,
tid=m_axis_tid,
tdest=m_axis_tdest,
tuser=m_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tkeep=s_axis_tkeep,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tid=s_axis_tid,
s_axis_tdest=s_axis_tdest,
s_axis_tuser=s_axis_tuser,
m_axis_tdata=m_axis_tdata,
m_axis_tkeep=m_axis_tkeep,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
m_axis_tlast=m_axis_tlast,
m_axis_tid=m_axis_tid,
m_axis_tdest=m_axis_tdest,
m_axis_tuser=m_axis_tuser,
status_overflow=status_overflow,
status_bad_frame=status_bad_frame,
status_good_frame=status_good_frame
)
@always(delay(4))
def clkgen():
clk.next = not clk
status_overflow_asserted = Signal(bool(0))
status_bad_frame_asserted = Signal(bool(0))
status_good_frame_asserted = Signal(bool(0))
@always(clk.posedge)
def monitor():
if (status_overflow):
status_overflow_asserted.next = 1
if (status_bad_frame):
status_bad_frame_asserted.next = 1
if (status_good_frame):
status_good_frame_asserted.next = 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=1,
dest=1
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)),
id=2,
dest=1
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)),
id=3,
dest=1
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=4,
dest=1
)
test_frame2 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=4,
dest=2
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame1)
source.send(test_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=5,
dest=1
)
test_frame2 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=5,
dest=2
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
while s_axis_tvalid or m_axis_tvalid:
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
source_pause.next = True
yield clk.posedge
source_pause.next = False
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=6,
dest=1
)
test_frame2 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=6,
dest=2
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
while s_axis_tvalid or m_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert not status_overflow_asserted
assert not status_bad_frame_asserted
assert status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10',
id=7,
dest=1,
last_cycle_user=1
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame)
yield clk.posedge
yield delay(1000)
assert sink.empty()
assert not status_overflow_asserted
assert status_bad_frame_asserted
assert not status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 8: single packet overflow")
current_test.next = 8
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256))*2,
id=8,
dest=1
)
status_overflow_asserted.next = 0
status_bad_frame_asserted.next = 0
status_good_frame_asserted.next = 0
source.send(test_frame)
yield clk.posedge
yield delay(10000)
assert sink.empty()
assert status_overflow_asserted
assert not status_bad_frame_asserted
assert not status_good_frame_asserted
yield delay(100)
yield clk.posedge
print("test 9: initial sink pause")
current_test.next = 9
test_frame = axis_ep.AXIStreamFrame(
bytearray(range(24)),
id=9,
dest=1
)
sink_pause.next = 1
source.send(test_frame)
yield clk.posedge
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 10: initial sink pause, reset")
current_test.next = 10
test_frame = axis_ep.AXIStreamFrame(
bytearray(range(24)),
id=10,
dest=1
)
sink_pause.next = 1
source.send(test_frame)
yield clk.posedge
yield clk.posedge
yield clk.posedge
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
sink_pause.next = 0
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 11: backpressure test")
current_test.next = 11
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)),
id=11,
dest=1
)
sink_pause.next = 1
source.send(test_frame)
source.send(test_frame)
yield delay(1000)
yield clk.posedge
sink_pause.next = 0
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 12: many small packets")
current_test.next = 12
test_frame = axis_ep.AXIStreamFrame(
b'\xAA',
id=12,
dest=1
)
for k in range(64):
source.send(test_frame)
for k in range(64):
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
# Copyright (C) 2007 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import os
import stat
import tempfile
import time
try:
from stat import filemode as _filemode # PY 3.3
except ImportError:
from tarfile import filemode as _filemode
try:
import pwd
import grp
except ImportError:
pwd = grp = None
try:
from os import scandir # py 3.5
except ImportError:
try:
from scandir import scandir # requires "pip install scandir"
except ImportError:
scandir = None
from ._compat import PY3
from ._compat import u
from ._compat import unicode
__all__ = ['FilesystemError', 'AbstractedFS']
_months_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
def _memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
"""
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
cache = {}
return wrapper
# ===================================================================
# --- custom exceptions
# ===================================================================
class FilesystemError(Exception):
"""Custom class for filesystem-related exceptions.
You can raise this from an AbstractedFS subclass in order to
send a customized error string to the client.
"""
# ===================================================================
# --- base class
# ===================================================================
class AbstractedFS(object):
"""A class used to interact with the file system, providing a
cross-platform interface compatible with both Windows and
UNIX style filesystems where all paths use "/" separator.
AbstractedFS distinguishes between "real" filesystem paths and
"virtual" ftp paths emulating a UNIX chroot jail where the user
can not escape its home directory (example: real "/home/user"
path will be seen as "/" by the client)
It also provides some utility methods and wraps around all os.*
calls involving operations against the filesystem like creating
files or removing directories.
FilesystemError exception can be raised from within any of
the methods below in order to send a customized error string
to the client.
"""
def __init__(self, root, cmd_channel):
"""
- (str) root: the user "real" home directory (e.g. '/home/user')
- (instance) cmd_channel: the FTPHandler class instance
"""
assert isinstance(root, unicode)
# Set initial current working directory.
# By default initial cwd is set to "/" to emulate a chroot jail.
# If a different behavior is desired (e.g. initial cwd = root,
# to reflect the real filesystem) users overriding this class
# are responsible to set _cwd attribute as necessary.
self._cwd = u('/')
self._root = root
self.cmd_channel = cmd_channel
@property
def root(self):
"""The user home directory."""
return self._root
@property
def cwd(self):
"""The user current working directory."""
return self._cwd
@root.setter
def root(self, path):
assert isinstance(path, unicode), path
self._root = path
@cwd.setter
def cwd(self, path):
assert isinstance(path, unicode), path
self._cwd = path
# --- Pathname / conversion utilities
def ftpnorm(self, ftppath):
"""Normalize a "virtual" ftp pathname (typically the raw string
coming from client) depending on the current working directory.
Example (having "/foo" as current working directory):
>>> ftpnorm('bar')
'/foo/bar'
Note: directory separators are system independent ("/").
Pathname returned is always absolutized.
"""
assert isinstance(ftppath, unicode), ftppath
if os.path.isabs(ftppath):
p = os.path.normpath(ftppath)
else:
p = os.path.normpath(os.path.join(self.cwd, ftppath))
# normalize string in a standard web-path notation having '/'
# as separator.
if os.sep == "\\":
p = p.replace("\\", "/")
# os.path.normpath supports UNC paths (e.g. "//a/b/c") but we
# don't need them. In case we get an UNC path we collapse
# redundant separators appearing at the beginning of the string
while p[:2] == '//':
p = p[1:]
# Anti path traversal: don't trust user input, in the event
# that self.cwd is not absolute, return "/" as a safety measure.
# This is for extra protection, maybe not really necessary.
if not os.path.isabs(p):
p = u("/")
return p
def ftp2fs(self, ftppath):
"""Translate a "virtual" ftp pathname (typically the raw string
coming from client) into equivalent absolute "real" filesystem
pathname.
Example (having "/home/user" as root directory):
>>> ftp2fs("foo")
'/home/user/foo'
Note: directory separators are system dependent.
"""
assert isinstance(ftppath, unicode), ftppath
# as far as I know, it should always be path traversal safe...
if os.path.normpath(self.root) == os.sep:
return os.path.normpath(self.ftpnorm(ftppath))
else:
p = self.ftpnorm(ftppath)[1:]
return os.path.normpath(os.path.join(self.root, p))
def fs2ftp(self, fspath):
"""Translate a "real" filesystem pathname into equivalent
absolute "virtual" ftp pathname depending on the user's
root directory.
Example (having "/home/user" as root directory):
>>> fs2ftp("/home/user/foo")
'/foo'
As for ftpnorm, directory separators are system independent
("/") and pathname returned is always absolutized.
On invalid pathnames escaping from user's root directory
(e.g. "/home" when root is "/home/user") always return "/".
"""
assert isinstance(fspath, unicode), fspath
if os.path.isabs(fspath):
p = os.path.normpath(fspath)
else:
p = os.path.normpath(os.path.join(self.root, fspath))
if not self.validpath(p):
return u('/')
p = p.replace(os.sep, "/")
p = p[len(self.root):]
if not p.startswith('/'):
p = '/' + p
return p
def validpath(self, path):
"""Check whether the path belongs to user's home directory.
Expected argument is a "real" filesystem pathname.
If path is a symbolic link it is resolved to check its real
destination.
Pathnames escaping from user's root directory are considered
not valid.
"""
assert isinstance(path, unicode), path
root = self.realpath(self.root)
path = self.realpath(path)
if not root.endswith(os.sep):
root = root + os.sep
if not path.endswith(os.sep):
path = path + os.sep
if path[0:len(root)] == root:
return True
return False
# --- Wrapper methods around open() and tempfile.mkstemp
def open(self, filename, mode):
"""Open a file returning its handler."""
assert isinstance(filename, unicode), filename
return open(filename, mode)
def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'):
"""A wrap around tempfile.mkstemp creating a file with a unique
name. Unlike mkstemp it returns an object with a file-like
interface.
"""
class FileWrapper:
def __init__(self, fd, name):
self.file = fd
self.name = name
def __getattr__(self, attr):
return getattr(self.file, attr)
text = 'b' not in mode
# max number of tries to find out a unique file name
tempfile.TMP_MAX = 50
fd, name = tempfile.mkstemp(suffix, prefix, dir, text=text)
file = os.fdopen(fd, mode)
return FileWrapper(file, name)
# --- Wrapper methods around os.* calls
def chdir(self, path):
"""Change the current directory. If this method is overridden
it is vital that `cwd` attribute gets set.
"""
# note: process cwd will be reset by the caller
assert isinstance(path, unicode), path
os.chdir(path)
self.cwd = self.fs2ftp(path)
def mkdir(self, path):
"""Create the specified directory."""
assert isinstance(path, unicode), path
os.mkdir(path)
def listdir(self, path):
"""List the content of a directory."""
assert isinstance(path, unicode), path
return os.listdir(path)
def listdirinfo(self, path):
"""List the content of a directory."""
assert isinstance(path, unicode), path
return os.listdir(path)
def rmdir(self, path):
"""Remove the specified directory."""
assert isinstance(path, unicode), path
os.rmdir(path)
def remove(self, path):
"""Remove the specified file."""
assert isinstance(path, unicode), path
os.remove(path)
def rename(self, src, dst):
"""Rename the specified src file to the dst filename."""
assert isinstance(src, unicode), src
assert isinstance(dst, unicode), dst
os.rename(src, dst)
def chmod(self, path, mode):
"""Change file/directory mode."""
assert isinstance(path, unicode), path
if not hasattr(os, 'chmod'):
raise NotImplementedError
os.chmod(path, mode)
def stat(self, path):
"""Perform a stat() system call on the given path."""
# on python 2 we might also get bytes from os.lisdir()
# assert isinstance(path, unicode), path
return os.stat(path)
def utime(self, path, timeval):
"""Perform a utime() call on the given path"""
# utime expects a int/float (atime, mtime) in seconds
# thus, setting both access and modify time to timeval
return os.utime(path, (timeval, timeval))
if hasattr(os, 'lstat'):
def lstat(self, path):
"""Like stat but does not follow symbolic links."""
# on python 2 we might also get bytes from os.lisdir()
# assert isinstance(path, unicode), path
return os.lstat(path)
else:
lstat = stat
if hasattr(os, 'readlink'):
def readlink(self, path):
"""Return a string representing the path to which a
symbolic link points.
"""
assert isinstance(path, unicode), path
return os.readlink(path)
# --- Wrapper methods around os.path.* calls
def isfile(self, path):
"""Return True if path is a file."""
assert isinstance(path, unicode), path
return os.path.isfile(path)
def islink(self, path):
"""Return True if path is a symbolic link."""
assert isinstance(path, unicode), path
return os.path.islink(path)
def isdir(self, path):
"""Return True if path is a directory."""
assert isinstance(path, unicode), path
return os.path.isdir(path)
def getsize(self, path):
"""Return the size of the specified file in bytes."""
assert isinstance(path, unicode), path
return os.path.getsize(path)
def getmtime(self, path):
"""Return the last modified time as a number of seconds since
the epoch."""
assert isinstance(path, unicode), path
return os.path.getmtime(path)
def realpath(self, path):
"""Return the canonical version of path eliminating any
symbolic links encountered in the path (if they are
supported by the operating system).
"""
assert isinstance(path, unicode), path
return os.path.realpath(path)
def lexists(self, path):
"""Return True if path refers to an existing path, including
a broken or circular symbolic link.
"""
assert isinstance(path, unicode), path
return os.path.lexists(path)
if pwd is not None:
def get_user_by_uid(self, uid):
"""Return the username associated with user id.
If this can't be determined return raw uid instead.
On Windows just return "owner".
"""
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return uid
else:
def get_user_by_uid(self, uid):
return "owner"
if grp is not None:
def get_group_by_gid(self, gid):
"""Return the groupname associated with group id.
If this can't be determined return raw gid instead.
On Windows just return "group".
"""
try:
return grp.getgrgid(gid).gr_name
except KeyError:
return gid
else:
def get_group_by_gid(self, gid):
return "group"
# --- Listing utilities
def format_list(self, basedir, listing, ignore_err=True):
"""Return an iterator object that yields the entries of given
directory emulating the "/bin/ls -lA" UNIX command output.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (bool) ignore_err: when False raise exception if os.lstat()
call fails.
On platforms which do not support the pwd and grp modules (such
as Windows), ownership is printed as "owner" and "group" as a
default, and number of hard links is always "1". On UNIX
systems, the actual owner, group, and number of links are
printed.
This is how output appears to client:
-rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3
drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books
-rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py
"""
@_memoize
def get_user_by_uid(uid):
return self.get_user_by_uid(uid)
@_memoize
def get_group_by_gid(gid):
return self.get_group_by_gid(gid)
assert isinstance(basedir, unicode), basedir
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
SIX_MONTHS = 180 * 24 * 60 * 60
readlink = getattr(self, 'readlink', None)
now = time.time()
for basename in listing:
if not PY3:
try:
file = os.path.join(basedir, basename)
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
file = os.path.join(bytes(basedir), bytes(basename))
if not isinstance(basename, unicode):
basename = unicode(basename, 'utf8', 'ignore')
else:
file = os.path.join(basedir, basename)
try:
st = self.lstat(file)
except (OSError, FilesystemError):
if ignore_err:
continue
raise
perms = _filemode(st.st_mode) # permissions
nlinks = st.st_nlink # number of links to inode
if not nlinks: # non-posix system, let's use a bogus value
nlinks = 1
size = st.st_size # file size
uname = get_user_by_uid(st.st_uid)
gname = get_group_by_gid(st.st_gid)
mtime = timefunc(st.st_mtime)
# if modification time > 6 months shows "month year"
# else "month hh:mm"; this matches proftpd format, see:
# https://github.com/giampaolo/pyftpdlib/issues/187
if (now - st.st_mtime) > SIX_MONTHS:
fmtstr = "%d %Y"
else:
fmtstr = "%d %H:%M"
try:
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime(fmtstr, mtime))
except ValueError:
# It could be raised if last mtime happens to be too
# old (prior to year 1900) in which case we return
# the current time as last mtime.
mtime = timefunc()
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime("%d %H:%M", mtime))
# same as stat.S_ISLNK(st.st_mode) but slighlty faster
islink = (st.st_mode & 61440) == stat.S_IFLNK
if islink and readlink is not None:
# if the file is a symlink, resolve it, e.g.
# "symlink -> realfile"
try:
basename = basename + " -> " + readlink(file)
except (OSError, FilesystemError):
if not ignore_err:
raise
# formatting is matched with proftpd ls output
line = "%s %3s %-8s %-8s %8s %s %s\r\n" % (
perms, nlinks, uname, gname, size, mtimestr, basename)
yield line.encode('utf8', self.cmd_channel.unicode_errors)
def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True):
"""Return an iterator object that yields the entries of a given
directory or of a single file in a form suitable with MLSD and
MLST commands.
Every entry includes a list of "facts" referring the listed
element. See RFC-3659, chapter 7, to see what every single
fact stands for.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (str) perms: the string referencing the user permissions.
- (str) facts: the list of "facts" to be returned.
- (bool) ignore_err: when False raise exception if os.stat()
call fails.
Note that "facts" returned may change depending on the platform
and on what user specified by using the OPTS command.
This is how output could appear to the client issuing
a MLSD request:
type=file;size=156;perm=r;modify=20071029155301;unique=8012; music.mp3
type=dir;size=0;perm=el;modify=20071127230206;unique=801e33; ebooks
type=file;size=211;perm=r;modify=20071103093626;unique=192; module.py
"""
assert isinstance(basedir, unicode), basedir
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
permdir = ''.join([x for x in perms if x not in 'arw'])
permfile = ''.join([x for x in perms if x not in 'celmp'])
if ('w' in perms) or ('a' in perms) or ('f' in perms):
permdir += 'c'
if 'd' in perms:
permdir += 'p'
show_type = 'type' in facts
show_perm = 'perm' in facts
show_size = 'size' in facts
show_modify = 'modify' in facts
show_create = 'create' in facts
show_mode = 'unix.mode' in facts
show_uid = 'unix.uid' in facts
show_gid = 'unix.gid' in facts
show_unique = 'unique' in facts
for basename in listing:
retfacts = dict()
if not PY3:
try:
file = os.path.join(basedir, basename)
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
file = os.path.join(bytes(basedir), bytes(basename))
if not isinstance(basename, unicode):
basename = unicode(basename, 'utf8', 'ignore')
else:
file = os.path.join(basedir, basename)
# in order to properly implement 'unique' fact (RFC-3659,
# chapter 7.5.2) we are supposed to follow symlinks, hence
# use os.stat() instead of os.lstat()
try:
st = self.stat(file)
except (OSError, FilesystemError):
if ignore_err:
continue
raise
# type + perm
# same as stat.S_ISDIR(st.st_mode) but slightly faster
isdir = (st.st_mode & 61440) == stat.S_IFDIR
if isdir:
if show_type:
if basename == '.':
retfacts['type'] = 'cdir'
elif basename == '..':
retfacts['type'] = 'pdir'
else:
retfacts['type'] = 'dir'
if show_perm:
retfacts['perm'] = permdir
else:
if show_type:
retfacts['type'] = 'file'
if show_perm:
retfacts['perm'] = permfile
if show_size:
retfacts['size'] = st.st_size # file size
# last modification time
if show_modify:
try:
retfacts['modify'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_mtime))
# it could be raised if last mtime happens to be too old
# (prior to year 1900)
except ValueError:
pass
if show_create:
# on Windows we can provide also the creation time
try:
retfacts['create'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_ctime))
except ValueError:
pass
# UNIX only
if show_mode:
retfacts['unix.mode'] = oct(st.st_mode & 511)
if show_uid:
retfacts['unix.uid'] = st.st_uid
if show_gid:
retfacts['unix.gid'] = st.st_gid
# We provide unique fact (see RFC-3659, chapter 7.5.2) on
# posix platforms only; we get it by mixing st_dev and
# st_ino values which should be enough for granting an
# uniqueness for the file listed.
# The same approach is used by pure-ftpd.
# Implementors who want to provide unique fact on other
# platforms should use some platform-specific method (e.g.
# on Windows NTFS filesystems MTF records could be used).
if show_unique:
retfacts['unique'] = "%xg%x" % (st.st_dev, st.st_ino)
# facts can be in any order but we sort them by name
factstring = "".join(["%s=%s;" % (x, retfacts[x])
for x in sorted(retfacts.keys())])
line = "%s %s\r\n" % (factstring, basename)
yield line.encode('utf8', self.cmd_channel.unicode_errors)
# ===================================================================
# --- platform specific implementation
# ===================================================================
if os.name == 'posix':
__all__.append('UnixFilesystem')
class UnixFilesystem(AbstractedFS):
"""Represents the real UNIX filesystem.
Differently from AbstractedFS the client will login into
/home/<username> and will be able to escape its home directory
and navigate the real filesystem.
"""
def __init__(self, root, cmd_channel):
AbstractedFS.__init__(self, root, cmd_channel)
# initial cwd was set to "/" to emulate a chroot jail
self.cwd = root
def ftp2fs(self, ftppath):
return self.ftpnorm(ftppath)
def fs2ftp(self, fspath):
return fspath
def validpath(self, path):
# validpath was used to check symlinks escaping user home
# directory; this is no longer necessary.
return True
|
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from math import floor
import pandas as pd
from google.cloud import bigquery as bq
"""
This script is meant to orchestrate BigQuery load jobs of many
json files on Google Cloud Storage. It ensures that each load
stays under the 15 TB per load job limit. It operates on the
output of gsutil -l.
Args:
--project: GCP project ID
--dataset: BigQuery datset ID containing the table your wish
to populate.
--table: BigQuery table ID of the table you wish to populate
--sources_file: This is the output of gsutil -l with the URI of
each file that you would like to load
--create_table: Boolean specifying if this script should create
the destination table.
--schema_file: Path to a json file defining the destination BigQuery
table schema.
--partitioning_column: name of the field for date partitioning.
--max_bad_records: Number of permissible bad records per load job.
Example Usage:
gsutil -l gs://<bucket>/path/to/json/<file prefix>-*.json >> ./files2load.txt
# This is for an existing bigquery table.
python file_15TB_batcher.py --project=<project> \
--dataset=<dataset_id> \
--table=<table_id> \
--partitioning_column=date \
--sources_file=files_to_load.txt
"""
def create_bq_table(bq_cli,
dataset_id,
table_id,
schema_file,
partition_column=None):
"""
This function creates a BigQuery Table with the schema, and optionally
partioning on teh specified column.
Args:
bq_cli: (bigquery.Client)
table_id: (str) The name for the BigQuery table to create.
schema: (str) path to a json file defining the BigQuery schema.
partition_column: (str) the field name for date partitioning.
Retruns:
table: (bigquery.table.Table)
"""
# Initialize the Table object.
dataset = bq_cli.dataset(dataset_id)
table_ref = dataset.table(table_id)
# Read the schema file.
with open(schema_file, 'rb') as sf:
schema_dict = json.load(sf)
parsed_schema = [
bq.schema.SchemaField.from_api_repr(field) for field in schema_dict
]
table = bq.table.Table(table_ref, schema=parsed_schema)
# Specify the partioning logic.
partitioning = bq.table.TimePartitioning()
partitioning.field = partition_column
table.time_partitioning = partitioning
# API call to create the empty table.
bq_cli.create_table(table)
return table
def parse_gsutil_long_output_file(filename):
"""
This function reads the specified file (which should be the output of
gsutil -l) and batches the URI's up into batches <= 15TB
Args:
filename: (str) path to input file.
Returns:
batches: (Array of list of str) Each list in this array
contains GCS URIs to be loaded in a single job.
"""
# 15TB per BQ load job.
MAX_BATCH_BYTES = 15 * 10**12
# read output of gsutil ls -l
df = pd.read_csv(filename,
delim_whitespace=True,
header=None,
skipfooter=1,
usecols=[0, 2],
names=['bytes', 'filename'],
engine='python')
# df = df.rename({0:'bytes', 2:'filename'}, axis='columns')
# sort files by size
df.sort_values(by='bytes', inplace=True)
df['cum_sum_bytes'] = df['bytes'].cumsum()
df['batch_num'] = df['cum_sum_bytes'] // MAX_BATCH_BYTES
batches = []
total_batches = int(df['batch_num'].max() + 1)
for i in range(total_batches):
batches.append(list(df[df['batch_num'] == i]['filename']))
return batches
def submit_jobs(bq_cli, job_config, dataset_id, table_id, batches):
"""
Helper function to submit a single load job for each batch.
These jobs will run in parallel.
Args:
bq_cli: (bigquery.Client) the client to use for loading.
job_config: (bigquery.job.LoadConfig) specifies file type
write disposition etc.
dataset_id: (str) destination BigQuery dataset id.
table_id: (str) destination BigQuery table id.
batches: (Array of list of str) Each list in this array
contains GCS URIs to be loaded in a single job.
"""
dataset = bq_cli.dataset(dataset_id)
table_ref = dataset.table(table_id)
for (i, batch) in enumerate(batches):
print(('running load job {} of {}.'.format(i, len(batches))))
# API call.
bq_cli.load_table_from_uri(source_uris=batch,
destination=table_ref,
job_config=job_config)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--project',
dest='project',
required=True,
help='GCP Project ID.')
parser.add_argument('--dataset',
dest='dataset',
required=True,
help='BigQuery Dataset ID.')
parser.add_argument('--table',
dest='table',
required=True,
help='BigQuery Table ID.')
parser.add_argument('--sources_file',
dest='sources_file',
required=True,
help='A local file containing the'
'output of gsutil ls -l.')
parser.add_argument('--create_table',
dest='create_table',
action='store_true')
parser.add_argument('--schema_file',
dest='schema_file',
required=False,
default=None)
parser.add_argument('--partition_column',
dest='partition_column',
required=False,
default=None)
parser.add_argument('--max_bad_records',
dest='max_bad_records',
required=False,
default=1)
parser.add_argument('--source_format',
dest='source_format',
required=False,
default='AVRO')
known_args, _ = parser.parse_known_args(argv)
bq_cli = bq.Client(project=known_args.project)
job_config = bq.job.LoadJobConfig()
job_config.write_disposition = bq.WriteDisposition.WRITE_APPEND
job_config.source_format = known_args.source_format
job_config.max_bad_records = known_args.max_bad_records
if known_args.create_table:
if known_args.schema_file:
create_bq_table(bq_cli,
known_args.dataset,
known_args.table,
known_args.schema_file,
partition_column=known_args.partition_column)
else:
raise argparse.ArgumentError('Cannot create table without schema.')
batches = parse_gsutil_long_output_file(filename=known_args.sources_file)
submit_jobs(bq_cli=bq_cli,
job_config=job_config,
dataset_id=known_args.dataset,
table_id=known_args.table,
batches=batches)
if __name__ == "__main__":
main()
|
|
'''
Copyright (c) 2010 Pavel Grafov
Implementation of AVL trees (http://en.wikipedia.org/wiki/AVL_tree) in Python.
Class AVL Tree supports the following functionality:
- insertion of a new entry in the tree;
- removal of any entry in the tree;
- search for any entry in the tree;
- "sanity check" for the tree (described later);
- 4 various tree traversals
- preorder,
- inorder,
- postorder,
- inorder non-recursive.
I would like to mention some sources, that helped me a lot while working on this code:
1) Wikipedia
1a) http://en.wikipedia.org/wiki/AVL_tree
Description of AVL trees.
1b) http://en.wikipedia.org/wiki/Tree_traversal
Description of tree traversals in binary search trees and
sample implementations of traversal algorithms in pseudocode.
2) http://www.cse.ohio-state.edu/~sgomori/570/avlrotations.html
Rotation algorithms for putting an out-of-balance AVL tree back in balance.
3) http://sourceforge.net/projects/standardavl/
Implementation of AVL trees in C++. I borrowed an idea of "sanity check" -
a method, which traverses the tree and checks that tree is in balance, contains
no circular references, height for each node is calculated correctly and so on.
4) http://oopweb.com/Algorithms/Documents/AvlTrees/Volume/AvlTrees.htm
From this page I borrowed the idea how to correctly delete an entry
from an AVL tree.
This code is available under MIT License.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
##adapted from github of Pavel Grafov: https://github.com/pgrafov
import random, math
import sys
def random_data_generator (max_r):
for i in xrange(max_r):
yield random.randint(0, max_r)
class Node():
def __init__(self, key):
self.key = key[0]
self.value = key
self.parent = None
self.leftChild = None
self.rightChild = None
self.height = 0
def __str__(self):
return str(self.key) + "(" + str(self.height) + ")"
def is_leaf(self):
return (self.height == 0)
def max_children_height(self):
if self.leftChild and self.rightChild:
return max(self.leftChild.height, self.rightChild.height)
elif self.leftChild and not self.rightChild:
return self.leftChild.height
elif not self.leftChild and self.rightChild:
return self.rightChild.height
else:
return -1
def balance (self):
return (self.leftChild.height if self.leftChild else -1) - (self.rightChild.height if self.rightChild else -1)
class AVLTree():
def __init__(self, *args):
self.rootNode = None
self.elements_count = 0
self.rebalance_count = 0
if len(args) == 1:
for i in args[0]:
self.insert (i)
def height(self):
if self.rootNode:
return self.rootNode.height
else:
return 0
def rebalance (self, node_to_rebalance):
self.rebalance_count += 1
A = node_to_rebalance
F = A.parent #allowed to be NULL
if node_to_rebalance.balance() == -2:
if node_to_rebalance.rightChild.balance() <= 0:
"""Rebalance, case RRC """
B = A.rightChild
C = B.rightChild
assert (not A is None and not B is None and not C is None)
A.rightChild = B.leftChild
if A.rightChild:
A.rightChild.parent = A
B.leftChild = A
A.parent = B
if F is None:
self.rootNode = B
self.rootNode.parent = None
else:
if F.rightChild == A:
F.rightChild = B
else:
F.leftChild = B
B.parent = F
self.recompute_heights (A)
self.recompute_heights (B.parent)
else:
"""Rebalance, case RLC """
B = A.rightChild
C = B.leftChild
assert (not A is None and not B is None and not C is None)
B.leftChild = C.rightChild
if B.leftChild:
B.leftChild.parent = B
A.rightChild = C.leftChild
if A.rightChild:
A.rightChild.parent = A
C.rightChild = B
B.parent = C
C.leftChild = A
A.parent = C
if F is None:
self.rootNode = C
self.rootNode.parent = None
else:
if F.rightChild == A:
F.rightChild = C
else:
F.leftChild = C
C.parent = F
self.recompute_heights (A)
self.recompute_heights (B)
else:
assert(node_to_rebalance.balance() == +2)
if node_to_rebalance.leftChild.balance() >= 0:
B = A.leftChild
C = B.leftChild
"""Rebalance, case LLC """
assert (not A is None and not B is None and not C is None)
A.leftChild = B.rightChild
if (A.leftChild):
A.leftChild.parent = A
B.rightChild = A
A.parent = B
if F is None:
self.rootNode = B
self.rootNode.parent = None
else:
if F.rightChild == A:
F.rightChild = B
else:
F.leftChild = B
B.parent = F
self.recompute_heights (A)
self.recompute_heights (B.parent)
else:
B = A.leftChild
C = B.rightChild
"""Rebalance, case LRC """
assert (not A is None and not B is None and not C is None)
A.leftChild = C.rightChild
if A.leftChild:
A.leftChild.parent = A
B.rightChild = C.leftChild
if B.rightChild:
B.rightChild.parent = B
C.leftChild = B
B.parent = C
C.rightChild = A
A.parent = C
if F is None:
self.rootNode = C
self.rootNode.parent = None
else:
if (F.rightChild == A):
F.rightChild = C
else:
F.leftChild = C
C.parent = F
self.recompute_heights (A)
self.recompute_heights (B)
def sanity_check (self, *args):
if len(args) == 0:
node = self.rootNode
else:
node = args[0]
if (node is None) or (node.is_leaf() and node.parent is None ):
# trival - no sanity check needed, as either the tree is empty or there is only one node in the tree
pass
else:
if node.height != node.max_children_height() + 1:
raise Exception ("Invalid height for node " + str(node) + ": " + str(node.height) + " instead of " + str(node.max_children_height() + 1) + "!" )
balFactor = node.balance()
#Test the balance factor
if not (balFactor >= -1 and balFactor <= 1):
raise Exception ("Balance factor for node " + str(node) + " is " + str(balFactor) + "!")
#Make sure we have no circular references
if not (node.leftChild != node):
raise Exception ("Circular reference for node " + str(node) + ": node.leftChild is node!")
if not (node.rightChild != node):
raise Exception ("Circular reference for node " + str(node) + ": node.rightChild is node!")
if ( node.leftChild ):
if not (node.leftChild.parent == node):
raise Exception ("Left child of node " + str(node) + " doesn't know who his father is!")
if not (node.leftChild.key <= node.key):
raise Exception ("Key of left child of node " + str(node) + " is greater than key of his parent!")
self.sanity_check(node.leftChild)
if ( node.rightChild ):
if not (node.rightChild.parent == node):
raise Exception ("Right child of node " + str(node) + " doesn't know who his father is!")
if not (node.rightChild.key >= node.key):
raise Exception ("Key of right child of node " + str(node) + " is less than key of his parent!")
self.sanity_check(node.rightChild)
def recompute_heights (self, start_from_node):
changed = True
node = start_from_node
while node and changed:
old_height = node.height
node.height = (node.max_children_height() + 1 if (node.rightChild or node.leftChild) else 0)
changed = node.height != old_height
node = node.parent
def add_as_child (self, parent_node, child_node):
node_to_rebalance = None
if child_node.key < parent_node.key:
if not parent_node.leftChild:
parent_node.leftChild = child_node
child_node.parent = parent_node
if parent_node.height == 0:
node = parent_node
while node:
node.height = node.max_children_height() + 1
if not node.balance () in [-1, 0, 1]:
node_to_rebalance = node
break #we need the one that is furthest from the root
node = node.parent
else:
self.add_as_child(parent_node.leftChild, child_node)
else:
if not parent_node.rightChild:
parent_node.rightChild = child_node
child_node.parent = parent_node
if parent_node.height == 0:
node = parent_node
while node:
node.height = node.max_children_height() + 1
if not node.balance () in [-1, 0, 1]:
node_to_rebalance = node
break #we need the one that is furthest from the root
node = node.parent
else:
self.add_as_child(parent_node.rightChild, child_node)
if node_to_rebalance:
self.rebalance (node_to_rebalance)
def insert (self, key):
new_node = Node(key)
if not self.rootNode:
self.rootNode = new_node
else:
elem = self.find(key[0])
if elem:
if type(elem.value) is list:
elem.value.append(new_node.value)
else:
elem.value = [elem.value]
elem.value.append(new_node.value)
else:
self.elements_count += 1
self.add_as_child (self.rootNode, new_node)
def find_biggest(self, start_node):
node = start_node
while node.rightChild:
node = node.rightChild
return node
def find_smallest(self, start_node):
node = start_node
while node.leftChild:
node = node.leftChild
return node
def predecessor(self, start_node):
node = start_node
if node.leftChild:
node = node.leftChild
while node.rightChild:
node = node.rightChild
return node
#elif node.parent < node:
# return node.parent
while node.parent is not None and node is node.parent.leftChild:
node = node.parent
return node.parent
def successor(self, start_node):
node = start_node
if node.rightChild:
node = node.rightChild
while node.leftChild:
node = node.leftChild
return node
while node.parent is not None and node is node.parent.rightChild:
node = node.parent
return node.parent
def inorder_non_recursive (self):
node = self.rootNode
retlst = []
while node.leftChild:
node = node.leftChild
while (node):
retlst += [node.key]
if (node.rightChild):
node = node.rightChild
while node.leftChild:
node = node.leftChild
else:
while ((node.parent) and (node == node.parent.rightChild)):
node = node.parent
node = node.parent
return retlst
def preorder(self, node, retlst = None):
if retlst is None:
retlst = []
retlst += [node.key]
if node.leftChild:
retlst = self.preorder(node.leftChild, retlst)
if node.rightChild:
retlst = self.preorder(node.rightChild, retlst)
return retlst
def inorder(self, node, end, retlst = None):
if retlst is None:
retlst = []
#if node.leftChild:
# retlst = self.inorder(node.leftChild, retlst)
if node is None:
return retlst
#retlst += [node.value]
while node is not None:
if node.key < end:
retlst += [node.value]
node = self.successor(node)
else:
break
# if node.key >= end:
# print 'about to end'
# print node.value
# return retlst
# else:
# if node.rightChild:
# retlst = self.inorder(node.rightChild, retlst)
return retlst
def postorder(self, node, retlst = None):
if retlst is None:
retlst = []
if node.leftChild:
retlst = self.postorder(node.leftChild, retlst)
if node.rightChild:
retlst = self.postorder(node.rightChild, retlst)
retlst += [node.key]
return retlst
def as_list (self, pre_in_post):
if not self.rootNode:
return []
if pre_in_post == 0:
return self.preorder (self.rootNode)
elif pre_in_post == 1:
return self.inorder (self.rootNode)
elif pre_in_post == 2:
return self.postorder (self.rootNode)
elif pre_in_post == 3:
return self.inorder_non_recursive()
def find(self, key):
return self.find_in_subtree(self.rootNode, key )
def find_in_subtree(self, node, key):
if node is None:
return None # key not found
if key < node.key:
return self.find_in_subtree(node.leftChild, key)
elif key > node.key:
return self.find_in_subtree(node.rightChild, key)
else: # key is equal to node key
return node
def get_smallest_at_least(self, bound , node = None):
#if node == None:
# return None
if node is None:
node = self.rootNode
in_subtree = self.find_in_subtree(node, bound)
if in_subtree:
return in_subtree
left_parent = 0
while node is not None:
if node.key < bound:
node = node.rightChild
elif node.key > bound:
left_parent = node
node = node.leftChild
if left_parent == 0:
return node
else:
return left_parent
def remove (self, key):
# first find
node = self.find(key)
if not node is None:
self.elements_count -= 1
# There are three cases:
#
# 1) The node is a leaf. Remove it and return.
#
# 2) The node is a branch (has only 1 child). Make the pointer to this node
# point to the child of this node.
#
# 3) The node has two children. Swap items with the successor
# of the node (the smallest item in its right subtree) and
# delete the successor from the right subtree of the node.
if node.is_leaf():
self.remove_leaf(node)
elif (bool(node.leftChild)) ^ (bool(node.rightChild)):
self.remove_branch (node)
else:
assert (node.leftChild) and (node.rightChild)
self.swap_with_successor_and_remove (node)
def remove_leaf (self, node):
parent = node.parent
if (parent):
if parent.leftChild == node:
parent.leftChild = None
else:
assert (parent.rightChild == node)
parent.rightChild = None
self.recompute_heights(parent)
else:
self.rootNode = None
del node
# rebalance
node = parent
while (node):
if not node.balance() in [-1, 0, 1]:
self.rebalance(node)
node = node.parent
def remove_branch (self, node):
parent = node.parent
if (parent):
if parent.leftChild == node:
parent.leftChild = node.rightChild or node.leftChild
else:
assert (parent.rightChild == node)
parent.rightChild = node.rightChild or node.leftChild
if node.leftChild:
node.leftChild.parent = parent
else:
assert (node.rightChild)
node.rightChild.parent = parent
self.recompute_heights(parent)
del node
# rebalance
node = parent
while (node):
if not node.balance() in [-1, 0, 1]:
self.rebalance(node)
node = node.parent
def swap_with_successor_and_remove (self, node):
successor = self.find_smallest(node.rightChild)
self.swap_nodes (node, successor)
assert (node.leftChild is None)
if node.height == 0:
self.remove_leaf (node)
else:
self.remove_branch (node)
def swap_nodes (self, node1, node2):
assert (node1.height > node2.height)
parent1 = node1.parent
leftChild1 = node1.leftChild
rightChild1 = node1.rightChild
parent2 = node2.parent
assert (not parent2 is None)
assert (parent2.leftChild == node2 or parent2 == node1)
leftChild2 = node2.leftChild
assert (leftChild2 is None)
rightChild2 = node2.rightChild
# swap heights
tmp = node1.height
node1.height = node2.height
node2.height = tmp
if parent1:
if parent1.leftChild == node1:
parent1.leftChild = node2
else:
assert (parent1.rightChild == node1)
parent1.rightChild = node2
node2.parent = parent1
else:
self.rootNode = node2
node2.parent = None
node2.leftChild = leftChild1
leftChild1.parent = node2
node1.leftChild = leftChild2 # None
node1.rightChild = rightChild2
if rightChild2:
rightChild2.parent = node1
if not (parent2 == node1):
node2.rightChild = rightChild1
rightChild1.parent = node2
parent2.leftChild = node1
node1.parent = parent2
else:
node2.rightChild = node1
node1.parent = node2
# use for debug only and only with small trees
def out(self, start_node = None):
if start_node == None:
start_node = self.rootNode
space_symbol = "*"
spaces_count = 80
out_string = ""
initial_spaces_string = space_symbol * spaces_count + "\n"
if not start_node:
return "AVLTree is empty"
else:
level = [start_node]
while (len([i for i in level if (not i is None)])>0):
level_string = initial_spaces_string
for i in xrange(len(level)):
j = (i+1)* spaces_count / (len(level)+1)
level_string = level_string[:j] + (str(level[i]) if level[i] else space_symbol) + level_string[j+1:]
level_next = []
for i in level:
level_next += ([i.leftChild, i.rightChild] if i else [None, None])
level = level_next
out_string += level_string
return out_string
if __name__ == "__main__":
"""check empty tree creation"""
a = AVLTree ()
a.sanity_check()
"""check not empty tree creation"""
seq = [1,2,3,4,5,6,7,8,9,10,11,12]
seq_copy = [1,2,3,4,5,6,7,8,9,10,11,12]
#random.shuffle(seq)
b = AVLTree (seq)
b.sanity_check()
"""check that inorder traversal on an AVL tree
(and on a binary search tree in the whole)
will return values from the underlying set in order"""
assert (b.as_list(3) == b.as_list(1) == seq_copy)
"""check that node deletion works"""
c = AVLTree (random_data_generator (10000))
before_deletion = c.elements_count
for i in random_data_generator (1000):
c.remove(i)
after_deletion = c.elements_count
c.sanity_check()
assert (before_deletion >= after_deletion)
#print c.out()
"""check that an AVL tree's height is strictly less than
1.44*log2(N+2)-1 (there N is number of elements)"""
assert (c.height() < 1.44 * math.log(after_deletion+2, 2) - 1)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_request_initial(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_instance_view_request(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_restart_request_initial(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_reimage_request_initial(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_rebuild_request_initial(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_remote_desktop_file_request(
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-10-01-preview"
accept = "application/x-rdp"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile')
path_format_arguments = {
"roleInstanceName": _SERIALIZER.url("role_instance_name", role_instance_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class CloudServiceRoleInstancesOperations(object):
"""CloudServiceRoleInstancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_10_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a role instance from a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace
def get(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.RoleInstance":
"""Gets a role instance from a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:param expand: The expand expression to apply to the operation. The default value is
"instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleInstance, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleInstance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace
def get_instance_view(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> "_models.RoleInstanceView":
"""Retrieves information about the run-time state of a role instance in a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_instance_view_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.get_instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
cloud_service_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> Iterable["_models.RoleInstanceListResult"]:
"""Gets the list of all role instances in a cloud service. Use nextLink property in the response
to get the next page of role instances. Do this till nextLink is null to fetch all the role
instances.
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:param expand: The expand expression to apply to the operation. The default value is
"instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleInstanceListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstanceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstanceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("RoleInstanceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances'} # type: ignore
def _restart_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart'} # type: ignore
@distributed_trace
def begin_restart(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""The Reboot Role Instance asynchronous operation requests a reboot of a role instance in the
cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart'} # type: ignore
def _reimage_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reimage_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage'} # type: ignore
@distributed_trace
def begin_reimage(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""The Reimage Role Instance asynchronous operation reinstalls the operating system on instances
of web roles or worker roles.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reimage_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage'} # type: ignore
def _rebuild_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_rebuild_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._rebuild_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rebuild_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild'} # type: ignore
@distributed_trace
def begin_rebuild(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""The Rebuild Role Instance asynchronous operation reinstalls the operating system on instances
of web roles or worker roles and initializes the storage resources that are used by them. If
you do not want to initialize storage resources, you can use Reimage Role Instance.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rebuild_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rebuild.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild'} # type: ignore
@distributed_trace
def get_remote_desktop_file(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> IO:
"""Gets a remote desktop file for a role instance in a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_remote_desktop_file_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.get_remote_desktop_file.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_remote_desktop_file.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile'} # type: ignore
|
|
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Mocacoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "mocacoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for mocacoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if mocacoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('mocacoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run mocacoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("MOCACOIND", "mocacoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: mocacoind started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in mocacoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a mocacoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("MOCACOIND", "mocacoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: mocacoind started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple mocacoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
|
|
from __future__ import absolute_import, unicode_literals
import sys
import warnings
from kombu import Connection
from kombu.exceptions import ResourceError, ChannelError
from kombu.transport import virtual
from kombu.utils import uuid
from kombu.compression import compress
from kombu.tests.case import Case, MagicMock, Mock, mock, patch
PY3 = sys.version_info[0] == 3
PRINT_FQDN = 'builtins.print' if PY3 else '__builtin__.print'
def client(**kwargs):
return Connection(transport='kombu.transport.virtual:Transport', **kwargs)
def memory_client():
return Connection(transport='memory')
class test_BrokerState(Case):
def test_constructor(self):
s = virtual.BrokerState()
self.assertTrue(hasattr(s, 'exchanges'))
t = virtual.BrokerState(exchanges=16)
self.assertEqual(t.exchanges, 16)
class test_QoS(Case):
def setup(self):
self.q = virtual.QoS(client().channel(), prefetch_count=10)
def teardown(self):
self.q._on_collect.cancel()
def test_constructor(self):
self.assertTrue(self.q.channel)
self.assertTrue(self.q.prefetch_count)
self.assertFalse(self.q._delivered.restored)
self.assertTrue(self.q._on_collect)
def test_restore_visible__interface(self):
qos = virtual.QoS(client().channel())
qos.restore_visible()
@mock.stdouts
def test_can_consume(self, stdout, stderr):
_restored = []
class RestoreChannel(virtual.Channel):
do_restore = True
def _restore(self, message):
_restored.append(message)
self.assertTrue(self.q.can_consume())
for i in range(self.q.prefetch_count - 1):
self.q.append(i, uuid())
self.assertTrue(self.q.can_consume())
self.q.append(i + 1, uuid())
self.assertFalse(self.q.can_consume())
tag1 = next(iter(self.q._delivered))
self.q.ack(tag1)
self.assertTrue(self.q.can_consume())
tag2 = uuid()
self.q.append(i + 2, tag2)
self.assertFalse(self.q.can_consume())
self.q.reject(tag2)
self.assertTrue(self.q.can_consume())
self.q.channel = RestoreChannel(self.q.channel.connection)
tag3 = uuid()
self.q.append(i + 3, tag3)
self.q.reject(tag3, requeue=True)
self.q._flush()
self.q.restore_unacked_once()
self.assertListEqual(_restored, [11, 9, 8, 7, 6, 5, 4, 3, 2, 1])
self.assertTrue(self.q._delivered.restored)
self.assertFalse(self.q._delivered)
self.q.restore_unacked_once()
self.q._delivered.restored = False
self.q.restore_unacked_once()
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
self.q.restore_at_shutdown = False
self.q.restore_unacked_once()
def test_get(self):
self.q._delivered['foo'] = 1
self.assertEqual(self.q.get('foo'), 1)
class test_Message(Case):
def test_create(self):
c = client().channel()
data = c.prepare_message('the quick brown fox...')
tag = data['properties']['delivery_tag'] = uuid()
message = c.message_to_python(data)
self.assertIsInstance(message, virtual.Message)
self.assertIs(message, c.message_to_python(message))
if message.errors:
message._reraise_error()
self.assertEqual(message.body,
'the quick brown fox...'.encode('utf-8'))
self.assertTrue(message.delivery_tag, tag)
def test_create_no_body(self):
virtual.Message(Mock(), {
'body': None,
'properties': {'delivery_tag': 1}})
def test_serializable(self):
c = client().channel()
body, content_type = compress('the quick brown fox...', 'gzip')
data = c.prepare_message(body, headers={'compression': content_type})
tag = data['properties']['delivery_tag'] = uuid()
message = c.message_to_python(data)
dict_ = message.serializable()
self.assertEqual(dict_['body'],
'the quick brown fox...'.encode('utf-8'))
self.assertEqual(dict_['properties']['delivery_tag'], tag)
self.assertNotIn('compression', dict_['headers'])
class test_AbstractChannel(Case):
def test_get(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._get('queue')
def test_put(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._put('queue', 'm')
def test_size(self):
self.assertEqual(virtual.AbstractChannel()._size('queue'), 0)
def test_purge(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._purge('queue')
def test_delete(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._delete('queue')
def test_new_queue(self):
self.assertIsNone(virtual.AbstractChannel()._new_queue('queue'))
def test_has_queue(self):
self.assertTrue(virtual.AbstractChannel()._has_queue('queue'))
def test_poll(self):
cycle = Mock(name='cycle')
self.assertTrue(virtual.AbstractChannel()._poll(cycle))
cycle.get.assert_called()
class test_Channel(Case):
def setup(self):
self.channel = client().channel()
def teardown(self):
if self.channel._qos is not None:
self.channel._qos._on_collect.cancel()
def test_exceeds_channel_max(self):
c = client()
t = c.transport
avail = t._avail_channel_ids = Mock(name='_avail_channel_ids')
avail.pop.side_effect = IndexError()
with self.assertRaises(ResourceError):
virtual.Channel(t)
def test_exchange_bind_interface(self):
with self.assertRaises(NotImplementedError):
self.channel.exchange_bind('dest', 'src', 'key')
def test_exchange_unbind_interface(self):
with self.assertRaises(NotImplementedError):
self.channel.exchange_unbind('dest', 'src', 'key')
def test_queue_unbind_interface(self):
self.channel.queue_unbind('dest', 'ex', 'key')
def test_management(self):
m = self.channel.connection.client.get_manager()
self.assertTrue(m)
m.get_bindings()
m.close()
def test_exchange_declare(self):
c = self.channel
with self.assertRaises(ChannelError):
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True, passive=True)
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True)
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True, passive=True)
self.assertIn('test_exchange_declare', c.state.exchanges)
# can declare again with same values
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True)
self.assertIn('test_exchange_declare', c.state.exchanges)
# using different values raises NotEquivalentError
with self.assertRaises(virtual.NotEquivalentError):
c.exchange_declare('test_exchange_declare', 'direct',
durable=False, auto_delete=True)
def test_exchange_delete(self, ex='test_exchange_delete'):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(ex, 'direct', durable=True, auto_delete=True)
self.assertIn(ex, c.state.exchanges)
self.assertFalse(c.state.has_binding(ex, ex, ex)) # no bindings yet
c.exchange_delete(ex)
self.assertNotIn(ex, c.state.exchanges)
c.exchange_declare(ex, 'direct', durable=True, auto_delete=True)
c.queue_declare(ex)
c.queue_bind(ex, ex, ex)
self.assertTrue(c.state.has_binding(ex, ex, ex))
c.exchange_delete(ex)
self.assertFalse(c.state.has_binding(ex, ex, ex))
self.assertIn(ex, c.purged)
def test_queue_delete__if_empty(self, n='test_queue_delete__if_empty'):
class PurgeChannel(virtual.Channel):
purged = []
size = 30
def _purge(self, queue):
self.purged.append(queue)
def _size(self, queue):
return self.size
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
# tests code path that returns if queue already bound.
c.queue_bind(n, n, n)
c.queue_delete(n, if_empty=True)
self.assertTrue(c.state.has_binding(n, n, n))
c.size = 0
c.queue_delete(n, if_empty=True)
self.assertFalse(c.state.has_binding(n, n, n))
self.assertIn(n, c.purged)
def test_queue_purge(self, n='test_queue_purge'):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_purge(n)
self.assertIn(n, c.purged)
def test_basic_publish__anon_exchange(self):
c = memory_client().channel()
msg = MagicMock(name='msg')
c.encode_body = Mock(name='c.encode_body')
c.encode_body.return_value = (1, 2)
c._put = Mock(name='c._put')
c.basic_publish(msg, None, 'rkey', kw=1)
c._put.assert_called_with('rkey', msg, kw=1)
def test_basic_publish_unique_delivery_tags(self, n='test_uniq_tag'):
c1 = memory_client().channel()
c2 = memory_client().channel()
for c in (c1, c2):
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
m1 = c1.prepare_message('George Costanza')
m2 = c2.prepare_message('Elaine Marie Benes')
c1.basic_publish(m1, n, n)
c2.basic_publish(m2, n, n)
r1 = c1.message_to_python(c1.basic_get(n))
r2 = c2.message_to_python(c2.basic_get(n))
self.assertNotEqual(r1.delivery_tag, r2.delivery_tag)
with self.assertRaises(ValueError):
int(r1.delivery_tag)
with self.assertRaises(ValueError):
int(r2.delivery_tag)
def test_basic_publish__get__consume__restore(self,
n='test_basic_publish'):
c = memory_client().channel()
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_declare(n + '2')
c.queue_bind(n + '2', n, n)
m = c.prepare_message('nthex quick brown fox...')
c.basic_publish(m, n, n)
r1 = c.message_to_python(c.basic_get(n))
self.assertTrue(r1)
self.assertEqual(r1.body,
'nthex quick brown fox...'.encode('utf-8'))
self.assertIsNone(c.basic_get(n))
consumer_tag = uuid()
c.basic_consume(n + '2', False,
consumer_tag=consumer_tag, callback=lambda *a: None)
self.assertIn(n + '2', c._active_queues)
r2, _ = c.drain_events()
r2 = c.message_to_python(r2)
self.assertEqual(r2.body,
'nthex quick brown fox...'.encode('utf-8'))
self.assertEqual(r2.delivery_info['exchange'], n)
self.assertEqual(r2.delivery_info['routing_key'], n)
with self.assertRaises(virtual.Empty):
c.drain_events()
c.basic_cancel(consumer_tag)
c._restore(r2)
r3 = c.message_to_python(c.basic_get(n))
self.assertTrue(r3)
self.assertEqual(r3.body, 'nthex quick brown fox...'.encode('utf-8'))
self.assertIsNone(c.basic_get(n))
def test_basic_ack(self):
class MockQoS(virtual.QoS):
was_acked = False
def ack(self, delivery_tag):
self.was_acked = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_ack('foo')
self.assertTrue(self.channel._qos.was_acked)
def test_basic_recover__requeue(self):
class MockQoS(virtual.QoS):
was_restored = False
def restore_unacked(self):
self.was_restored = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_recover(requeue=True)
self.assertTrue(self.channel._qos.was_restored)
def test_restore_unacked_raises_BaseException(self):
q = self.channel.qos
q._flush = Mock()
q._delivered = {1: 1}
q.channel._restore = Mock()
q.channel._restore.side_effect = SystemExit
errors = q.restore_unacked()
self.assertIsInstance(errors[0][0], SystemExit)
self.assertEqual(errors[0][1], 1)
self.assertFalse(q._delivered)
@patch('kombu.transport.virtual.emergency_dump_state')
@patch(PRINT_FQDN)
def test_restore_unacked_once_when_unrestored(self, print_,
emergency_dump_state):
q = self.channel.qos
q._flush = Mock()
class State(dict):
restored = False
q._delivered = State({1: 1})
ru = q.restore_unacked = Mock()
exc = None
try:
raise KeyError()
except KeyError as exc_:
exc = exc_
ru.return_value = [(exc, 1)]
self.channel.do_restore = True
q.restore_unacked_once()
print_.assert_called()
emergency_dump_state.assert_called()
def test_basic_recover(self):
with self.assertRaises(NotImplementedError):
self.channel.basic_recover(requeue=False)
def test_basic_reject(self):
class MockQoS(virtual.QoS):
was_rejected = False
def reject(self, delivery_tag, requeue=False):
self.was_rejected = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_reject('foo')
self.assertTrue(self.channel._qos.was_rejected)
def test_basic_qos(self):
self.channel.basic_qos(prefetch_count=128)
self.assertEqual(self.channel._qos.prefetch_count, 128)
def test_lookup__undeliverable(self, n='test_lookup__undeliverable'):
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as log:
self.assertListEqual(
self.channel._lookup(n, n, 'ae.undeliver'),
['ae.undeliver'],
)
self.assertTrue(log)
self.assertIn('could not be delivered', log[0].message.args[0])
def test_context(self):
x = self.channel.__enter__()
self.assertIs(x, self.channel)
x.__exit__()
self.assertTrue(x.closed)
def test_cycle_property(self):
self.assertTrue(self.channel.cycle)
def test_flow(self):
with self.assertRaises(NotImplementedError):
self.channel.flow(False)
def test_close_when_no_connection(self):
self.channel.connection = None
self.channel.close()
self.assertTrue(self.channel.closed)
def test_drain_events_has_get_many(self):
c = self.channel
c._get_many = Mock()
c._poll = Mock()
c._consumers = [1]
c._qos = Mock()
c._qos.can_consume.return_value = True
c.drain_events(timeout=10.0)
c._get_many.assert_called_with(c._active_queues, timeout=10.0)
def test_get_exchanges(self):
self.channel.exchange_declare(exchange='foo')
self.assertTrue(self.channel.get_exchanges())
def test_basic_cancel_not_in_active_queues(self):
c = self.channel
c._consumers.add('x')
c._tag_to_queue['x'] = 'foo'
c._active_queues = Mock()
c._active_queues.remove.side_effect = ValueError()
c.basic_cancel('x')
c._active_queues.remove.assert_called_with('foo')
def test_basic_cancel_unknown_ctag(self):
self.assertIsNone(self.channel.basic_cancel('unknown-tag'))
def test_list_bindings(self):
c = self.channel
c.exchange_declare(exchange='foo')
c.queue_declare(queue='q')
c.queue_bind(queue='q', exchange='foo', routing_key='rk')
self.assertIn(('q', 'foo', 'rk'), list(c.list_bindings()))
def test_after_reply_message_received(self):
c = self.channel
c.queue_delete = Mock()
c.after_reply_message_received('foo')
c.queue_delete.assert_called_with('foo')
def test_queue_delete_unknown_queue(self):
self.assertIsNone(self.channel.queue_delete('xiwjqjwel'))
def test_queue_declare_passive(self):
has_queue = self.channel._has_queue = Mock()
has_queue.return_value = False
with self.assertRaises(ChannelError):
self.channel.queue_declare(queue='21wisdjwqe', passive=True)
def test_get_message_priority(self):
def _message(priority):
return self.channel.prepare_message(
'the message with priority', priority=priority,
)
self.assertEqual(
self.channel._get_message_priority(_message(5)), 5,
)
self.assertEqual(
self.channel._get_message_priority(
_message(self.channel.min_priority - 10),
),
self.channel.min_priority,
)
self.assertEqual(
self.channel._get_message_priority(
_message(self.channel.max_priority + 10),
),
self.channel.max_priority,
)
self.assertEqual(
self.channel._get_message_priority(_message('foobar')),
self.channel.default_priority,
)
self.assertEqual(
self.channel._get_message_priority(_message(2), reverse=True),
self.channel.max_priority - 2,
)
class test_Transport(Case):
def setup(self):
self.transport = client().transport
def test_custom_polling_interval(self):
x = client(transport_options=dict(polling_interval=32.3))
self.assertEqual(x.transport.polling_interval, 32.3)
def test_close_connection(self):
c1 = self.transport.create_channel(self.transport)
c2 = self.transport.create_channel(self.transport)
self.assertEqual(len(self.transport.channels), 2)
self.transport.close_connection(self.transport)
self.assertFalse(self.transport.channels)
del(c1) # so pyflakes doesn't complain
del(c2)
def test_drain_channel(self):
channel = self.transport.create_channel(self.transport)
with self.assertRaises(virtual.Empty):
self.transport._drain_channel(channel)
def test__deliver__no_queue(self):
with self.assertRaises(KeyError):
self.transport._deliver(Mock(name='msg'), queue=None)
def test__reject_inbound_message(self):
channel = Mock(name='channel')
self.transport.channels = [None, channel]
self.transport._reject_inbound_message({'foo': 'bar'})
channel.Message.assert_called_with(channel, {'foo': 'bar'})
channel.qos.append.assert_called_with(
channel.Message(), channel.Message().delivery_tag,
)
channel.basic_reject.assert_called_with(
channel.Message().delivery_tag, requeue=True,
)
def test_on_message_ready(self):
channel = Mock(name='channel')
msg = Mock(name='msg')
callback = Mock(name='callback')
self.transport._callbacks = {'q1': callback}
self.transport.on_message_ready(channel, msg, queue='q1')
callback.assert_called_with(msg)
def test_on_message_ready__no_queue(self):
with self.assertRaises(KeyError):
self.transport.on_message_ready(
Mock(name='channel'), Mock(name='msg'), queue=None)
def test_on_message_ready__no_callback(self):
self.transport._callbacks = {}
with self.assertRaises(KeyError):
self.transport.on_message_ready(
Mock(name='channel'), Mock(name='msg'), queue='q1')
|
|
# xmlrpc_registry.py - A method registry for use with xmlrpclib
# Copyright (C) 2001 by Eric Kidd. All rights reserved.
# Modified by Daniel J. Pearson on 24 Feb 2003
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import sys
import xmlrpclib
# Some type names for use in method signatures.
INT="int"
BOOLEAN="boolean"
DOUBLE="double"
STRING="string"
DATETIME="dateTime.iso8601"
BASE64="base64"
ARRAY="array"
STRUCT="struct"
# Some error codes, borrowed from xmlrpc-c.
INTERNAL_ERROR = -500
TYPE_ERROR = -501
INDEX_ERROR = -502
PARSE_ERROR = -503
NETWORK_ERROR = -504
TIMEOUT_ERROR = -505
NO_SUCH_METHOD_ERROR = -506
REQUEST_REFUSED_ERROR = -507
INTROSPECTION_DISABLED_ERROR = -508
LIMIT_EXCEEDED_ERROR = -509
INVALID_UTF8_ERROR = -510
class Registry:
"""A registry for XML-RPC methods supported by a server.
This is based on the method registry in xmlrpc-c."""
def __init__ (self, allow_introspection=1):
"""Create a new method registry."""
self._allow_introspection = allow_introspection
self._methods = {}
self._signatures = {}
self._help = {}
self._default_method = None
self._install_system_methods()
def _install_system_methods (self):
"""Install the build-in methods in the system.* suite."""
self.add_method('system.listMethods',
self.system_listMethods,
[[ARRAY]])
self.add_method('system.methodSignature',
self.system_methodSignature,
[[ARRAY, STRING]])
self.add_method('system.methodHelp',
self.system_methodHelp,
[[STRING, STRING]])
self.add_method('system.multicall',
self.system_multicall,
[[ARRAY, ARRAY]])
def add_method (self, name, method, signature=None, help=None):
"""Add a method to the registry, with optional documentation
and signature."""
# Try to default things sensibly.
if help is None:
help = method.__doc__
if help is None:
help = ''
if signature is None:
signature = 'undef'
# Install our new method.
self._methods[name] = method
self._signatures[name] = signature
self._help[name] = help
def register(self, func, signature=None, name=None, help=None):
if name is None:
name = func.__name__
if signature is None and hasattr(func, 'signature'):
signature = func.signature
self.add_method(name, func, signature, help)
def set_default_method (self, method):
"""Set a default method to handle otherwise unsupported requests."""
self._default_method = method
def dispatch_call (self, name, params):
"""Dispatch an XML-RPC request, and return the result."""
try:
# Try to find our method.
if self._methods.has_key(name):
method = self._methods[name]
else:
method = self._default_method
if method is None:
return self._no_such_method(name)
# Call our method and return the result.
return apply(method, params)
#except: # DEBUG
# import traceback #
# traceback.print_exc() #
# raise #
except xmlrpclib.Fault, f:
if f.faultCode == TYPE_ERROR:
raise TypeError(f.faultString)
elif f.faultCode == INDEX_ERROR:
raise IndexError(f.faultString)
elif f.faultCode == NO_SUCH_METHOD_ERROR:
raise AttributeError(f.faultString)
elif f.faultCode == INVALID_UTF8_ERROR:
raise UnicodeError(f.faultString)
else:
raise RuntimeError(f.faultString)
_dispatch = dispatch_call
def _no_such_method (self, name):
"""Raise a no-such-method error."""
raise xmlrpclib.Fault(NO_SUCH_METHOD_ERROR,
"Method '%s' not found" % (name))
def _introspection_check (self):
"""Raise an error if introspection is disabled."""
if not self._allow_introspection:
raise xmlrpclib.Fault(INTROSPECTION_DISABLED_ERROR,
("Introspection has been disabled on this " +
"server, probably for security reasons."))
def system_listMethods (self):
"""Return an array of all available XML-RPC methods on this server.
"""
self._introspection_check()
return self._methods.keys()
def system_methodSignature (self, name):
"""Given the name of a method, return an array of legal signatures. Each
signature is an array of strings. The first item of each signature is
the return type, and any others items are parameter types.
"""
self._introspection_check()
if self._signatures.has_key(name):
return self._signatures[name]
else:
self._no_such_method(name)
def system_methodHelp (self, name):
"""Given the name of a method, return a help string.
"""
self._introspection_check()
if self._help.has_key(name):
return self._help[name]
else:
self._no_such_method(name)
def system_multicall (self, calls):
"""Process an array of calls, and return an array of results. Calls
should be structs of the form {'methodName': string, 'params': array}.
Each result will either be a single-item array containg the result
value, or a struct of the form {'faultCode': int, 'faultString':
string}. This is useful when you need to make lots of small calls
without lots of round trips.
"""
results = []
for call in calls:
try:
name = call['methodName']
params = call['params']
if name == 'system.multicall':
errmsg = "Recursive system.multicall forbidden"
raise xmlrpclib.Fault(REQUEST_REFUSED_ERROR, errmsg)
result = [self.dispatch_call(name, params)]
except xmlrpclib.Fault, fault:
result = {'faultCode': fault.faultCode,
'faultString': fault.faultString}
except:
errmsg = "%s:%s" % (sys.exc_type, sys.exc_value)
result = {'faultCode': 1, 'faultString': errmsg}
results.append(result)
return results
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that act as activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: float >= 0. Negative slope coefficient.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.PReLU')
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
alpha_regularizer: regularizer for the weights.
alpha_constraint: constraint for the weights.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs, mask=None):
pos = K.relu(inputs)
if K.backend() == 'theano':
neg = (
K.pattern_broadcast(self.alpha, self.param_broadcast) *
(inputs - math_ops.abs(inputs)) * 0.5)
else:
neg = -self.alpha * K.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ELU')
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ThresholdedReLU')
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
`f(x) = x for x > theta`,
`f(x) = 0 otherwise`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
theta: float >= 0. Threshold location of activation.
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.theta = K.cast_to_floatx(theta)
def call(self, inputs, mask=None):
return inputs * math_ops.cast(
math_ops.greater(inputs, self.theta), K.floatx())
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.Softmax')
class Softmax(Layer):
"""Softmax activation function.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
axis: Integer, axis along which the softmax normalization is applied.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs):
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('keras.layers.ReLU')
class ReLU(Layer):
"""Rectified Linear Unit activation function.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
max_value: float >= 0. Maximum activation value.
"""
def __init__(self, max_value=None, **kwargs):
super(ReLU, self).__init__(**kwargs)
self.support_masking = True
self.max_value = K.cast_to_floatx(max_value)
if self.max_value < 0.:
raise ValueError('max_value of Relu layer '
'cannot be negative value: ' + str(max_value))
def call(self, inputs):
return activations.relu(inputs, max_value=self.max_value)
def get_config(self):
config = {'max_value': self.max_value}
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
|
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cProfile as profiler
import gc
import sys
import time
try:
import pstats
except Exception:
# Don't want to force pstats into the venv if it's not always used
pass
from neutron.api.v2 import attributes
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
import webob
LOG = logging.getLogger(__name__)
def filter_body(context, body, admin_only=None, always_filter=None):
if not context.is_admin and admin_only:
for attr in admin_only:
pop_param(body, attr)
if always_filter:
for attr in always_filter:
pop_param(body, attr)
def attr_specified(param):
return param is not attributes.ATTR_NOT_SPECIFIED
def timed(fn):
def _wrapped(*args, **kwargs):
began = time.time()
res = fn(*args, **kwargs)
elapsed = time.time() - began
LOG.info("Time for %s = %s" % (fn, elapsed))
return res
return _wrapped
def profile(output):
def _inner(fn):
def _wrapped(*args, **kw):
result = _profile(output, fn, *args, **kw)
# uncomment this to see who's calling what
# stats.print_callers()
return result
return _wrapped
return _inner
def live_profile(fn):
def _wrapped(*args, **kw):
elapsed, stat_loader, result = _live_profile(fn, *args, **kw)
stats = stat_loader()
stats.sort_stats('cumulative')
stats.print_stats()
# uncomment this to see who's calling what
# stats.print_callers()
return result
return _wrapped
def exc_wrapper(func):
def wrapped(*args, **kwargs):
e = None
try:
return func(*args, **kwargs)
except n_exc.NotFound as e:
raise webob.exc.HTTPNotFound(e)
except n_exc.Conflict as e:
raise webob.exc.HTTPConflict(e)
except n_exc.BadRequest as e:
raise webob.exc.HTTPBadRequest(e)
except Exception as e:
raise webob.exc.HTTPInternalServerError(e)
finally:
if not e:
LOG.exception(str(e))
return wrapped
def _profile(filename, fn, *args, **kw):
gc.collect()
profiler.runctx('result = fn(*args, **kw)', globals(), locals(),
filename=filename)
return locals()['result']
def _live_profile(fn, *args, **kw):
load_stats = lambda: pstats.Stats()
gc.collect()
began = time.time()
profiler.runctx('result = fn(*args, **kw)', globals(), locals())
ended = time.time()
return ended - began, load_stats, locals()['result']
def pop_param(attrs, param, default=None):
val = attrs.pop(param, default)
if attr_specified(val):
return val
return default
class Command(object):
def __init__(self, func):
self.func = func
self.result = None
self.called = False
self.is_rollback = False
def __call__(self, *args, **kwargs):
self.called = True
kwargs['rollback'] = self.is_rollback
self.result = self.func(*args, **kwargs)
return self.result
class CommandManager(object):
def __init__(self):
self.do_commands = []
self.undo_commands = []
@contextlib.contextmanager
def execute(self, exc=None):
try:
yield self
except Exception:
exc_info = sys.exc_info()
LOG.exception("Exception in transaction", exc_info=exc_info)
self.rollback()
raise exc_info[1]
def do(self, func):
cmd = Command(func)
self.do_commands.append(cmd)
return cmd
def undo(self, func):
cmd = Command(func)
cmd.is_rollback = True
self.undo_commands.append(cmd)
return cmd
def rollback(self):
do_commands = reversed(self.do_commands)
for cmd in reversed(self.undo_commands):
do = do_commands.next()
if not do.called:
continue
try:
cmd(do.result)
except Exception:
LOG.exception("Rollback failed and wasn't caught!")
class retry_loop(object):
def __init__(self, retry_times, delay=0, backoff=1):
self._retry_times = retry_times
self._delay = delay
self._backoff = backoff
def __call__(self, f):
def wrapped_f(*args, **kwargs):
level = self._retry_times
current_delay = self._delay
while level > 0:
try:
return f(*args, **kwargs)
except Exception:
level = level - 1
if level > 0:
if current_delay > 0:
time.sleep(current_delay)
if self._backoff > 0:
current_delay *= self._backoff
LOG.debug("Retrying `%s` %d more times...",
f.func_name, level)
else:
raise
return wrapped_f
def pretty_kwargs(**kwargs):
kwargs_str = ', '.join("%s=%s" % (k, v) for k, v in kwargs.items())
return kwargs_str
|
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import atleast2d_or_csr, check_arrays
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
`intercept_hidden_` : array-like, shape (n_components,)
Biases of the hidden units.
`intercept_visible_` : array-like, shape (n_features,)
Biases of the visible units.
`components_` : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
rng = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, rng)
v_ = self._sample_visibles(h_, rng)
return v_
def partial_fit(self, X):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
v = atleast2d_or_csr(X)
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import filecmp
import glob
import json
import pytest
import re
from tempfile import NamedTemporaryFile, mkdtemp
import screed
from shutil import rmtree
import sys
import kevlar
from kevlar.tests import data_file, data_glob
from khmer import Counttable
def test_novel_banding_args():
with pytest.raises(ValueError) as ve:
reads = list(kevlar.novel.novel(None, [], [], numbands=4))
assert 'Must specify `numbands` and `band` together' in str(ve)
with pytest.raises(ValueError) as ve:
reads = list(kevlar.novel.novel(None, [], [], band=0))
assert 'Must specify `numbands` and `band` together' in str(ve)
with pytest.raises(ValueError) as ve:
reads = list(kevlar.novel.novel(None, [], [], numbands=4, band=-1))
assert '`band` must be a value between 0 and 3' in str(ve)
def test_cli():
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq', '--control',
'cntl2.fq', '-k', '17',
])
assert args.ksize == 17
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands is None
assert args.band is None
args = kevlar.cli.parser().parse_args([
'novel', '--num-bands', '8', '--band', '1', '--case', 'case1.fq',
'--control', 'cntl1.fq', '--control', 'cntl2.fq',
])
assert args.ksize == 31
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands == 8
assert args.band == 1
with pytest.raises(ValueError) as ve:
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq',
'--band', '1'
])
kevlar.novel.main(args)
assert 'Must specify --num-bands and --band together' in str(ve)
@pytest.mark.parametrize('kmer', [
('ACCGTACAA' * 3),
('TTATAATAG' * 3),
('CGAAAAATT' * 3),
])
def test_assumptions(kmer):
ct = Counttable(27, 1e5, 2)
kmer_rc = kevlar.revcom(kmer)
assert ct.hash(kmer) == ct.hash(kmer_rc)
assert ct.get_kmer_hashes(kmer)[0] == ct.get_kmer_hashes(kmer_rc)[0]
@pytest.mark.parametrize('case,ctrl', [
('microtrios/trio-li-proband.fq.gz', 'microtrios/trio-li-??ther.fq.gz'),
('microtrios/trio-na-proband.fq.gz', 'microtrios/trio-na-??ther.fq.gz'),
('microtrios/trio-k-proband.fq.gz', 'microtrios/trio-k-??ther.fq.gz'),
])
def test_novel_single_mutation(case, ctrl, capsys):
casestr = data_file(case)
ctrls = kevlar.tests.data_glob(ctrl)
arglist = ['novel', '--case', casestr, '--ksize', '25', '--case-min', '7',
'--control', ctrls[0], '--control', ctrls[1],
'--num-bands', '2', '--band', '2',
'--ctrl-max', '0', '--memory', '500K']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case = int(abundmatch.group(1))
ctl1 = int(abundmatch.group(2))
ctl2 = int(abundmatch.group(3))
assert case >= 7, line
assert ctl1 == 0 and ctl2 == 0, line
def test_novel_two_cases(capsys):
cases = kevlar.tests.data_glob('trio1/case6*.fq')
controls = kevlar.tests.data_glob('trio1/ctrl[5,6].fq')
with NamedTemporaryFile(suffix='.ct') as case1ct, \
NamedTemporaryFile(suffix='.ct') as case2ct, \
NamedTemporaryFile(suffix='.ct') as ctrl1ct, \
NamedTemporaryFile(suffix='.ct') as ctrl2ct:
counttables = [case1ct, case2ct, ctrl1ct, ctrl2ct]
seqfiles = cases + controls
for ct, seqfile in zip(counttables, seqfiles):
arglist = ['count', '--ksize', '19', '--memory', '1e7', ct.name,
seqfile]
print(arglist)
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = ['novel', '--ksize', '19', '--memory', '1e7',
'--ctrl-max', '1', '--case-min', '7',
'--case', cases[0], '--case', cases[1],
'--case-counts', case1ct.name, case2ct.name,
'--control-counts', ctrl1ct.name, ctrl2ct.name]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert out.strip() != ''
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case1 = int(abundmatch.group(1))
case2 = int(abundmatch.group(2))
ctl1 = int(abundmatch.group(3))
ctl2 = int(abundmatch.group(4))
assert case1 >= 7 and case2 >= 7
assert ctl1 <= 1 and ctl2 <= 1
def test_kmer_rep_in_read(capsys):
from sys import stdout
read = ('AGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGAT'
'GAGGATGAGGATGAGGAT')
record = kevlar.sequence.Record(name='reqseq', sequence=read)
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 2, (11, 1, 0))
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 8, (11, 1, 0))
kevlar.print_augmented_fastx(record, stdout)
out, err = capsys.readouterr()
assert read in out
def test_iter_read_multi_file():
infiles = kevlar.tests.data_glob('bogus-genome/mask-chr[1,2].fa')
print(infiles)
records = [r for r in kevlar.multi_file_iter_khmer(infiles)]
assert len(records) == 4
def test_novel_abund_screen(capsys):
case = data_file('screen-case.fa')
ctrl = data_file('screen-ctrl.fa')
arglist = ['novel', '--ksize', '25', '--ctrl-max', '1', '--case-min', '8',
'--case', case, '--control', ctrl, '--abund-screen', '3']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert '>seq_error' not in out
def test_skip_until(capsys):
readname = 'bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1'
case = data_file('trio1/case1.fq')
ctrls = kevlar.tests.data_glob('trio1/ctrl[1,2].fq')
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
out, err = capsys.readouterr()
message = ('Found read bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1 '
'(skipped 1001 reads)')
assert message in err
assert '29 unique novel kmers in 14 reads' in err
readname = 'BOGUSREADNAME'
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'Found read' not in err
assert '(skipped ' not in err
assert 'Found 0 instances of 0 unique novel kmers in 0 reads' in err
def test_novel_save_counts():
outdir = mkdtemp()
try:
for ind in ('father', 'mother', 'proband'):
outfile = '{:s}/{:s}.ct'.format(outdir, ind)
infile = data_file('microtrios/trio-na-{:s}.fq.gz'.format(ind))
arglist = ['count', '--ksize', '27', '--memory', '500K', outfile,
infile]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', '--case',
data_file('microtrios/trio-na-proband.fq.gz'),
'--control', data_file('microtrios/trio-na-mother.fq.gz'),
'--control', data_file('microtrios/trio-na-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
counts = ('father', 'mother', 'proband')
testcounts = ('dad', 'mom', 'kid')
for c1, c2 in zip(counts, testcounts):
f1 = '{:s}/{:s}.ct'.format(outdir, c1)
f2 = '{:s}/{:s}.ct'.format(outdir, c2)
assert filecmp.cmp(f1, f2)
finally:
rmtree(outdir)
def test_novel_save_counts_mismatch(capsys):
outdir = mkdtemp()
try:
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', outdir + '/sibling.ct',
'--case', data_file('microtrios/trio-k-proband.fq.gz'),
'--control', data_file('microtrios/trio-k-mother.fq.gz'),
'--control', data_file('microtrios/trio-k-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
finally:
rmtree(outdir)
out, err = capsys.readouterr()
assert 'stubbornly refusing to save k-mer counts' in err
def test_novel_load_counts(capsys):
file1 = data_file('simple-genome-case-reads.fa.gz')
file2 = data_file('ambig.fasta')
file3 = data_file('simple-genome-case.ct')
file4, file5 = data_glob('simple-genome-ctrl?.ct')
arglist = [
'novel', '-k', '25',
'--case', file1, file2, '--case-counts', file3,
'--control-counts', file4, file5
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'counttables for 2 sample(s) provided' in err
|
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# This is a system emulation script with Aladdin accelerators.
#
# "m5 test.py"
import ConfigParser
import optparse
import sys
import os
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2000 import *
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(',')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(',')
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
process.cwd = os.getcwd()
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
np = options.num_cpus
if np > 0:
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#print "CPUClass:%s, test_mem_mode:%s, FutureClass:%s" % (CPUClass, test_mem_mode, FutureClass)
CPUClass.numThreads = numThreads
MemClass = Simulation.setMemClass(options)
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
system = System(mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
if np > 0:
system.cpu = [CPUClass(cpu_id=i, dtb=X86TLB(size=16), itb=X86TLB(size=16)) for i in xrange(np)]
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
cpu.dtb.size = 16
if options.accel_cfg_file:
config = ConfigParser.SafeConfigParser()
config.read(options.accel_cfg_file)
accels = config.sections()
if not accels:
fatal("No accelerators were specified!")
datapaths = []
for accel in accels:
memory_type = config.get(accel, 'memory_type').lower()
# Accelerators need their own clock domain!
cycleTime = config.getint(accel, "cycle_time")
clock = "%1.3fGHz" % (1/cycleTime)
clk_domain = SrcClockDomain(
clock = clock, voltage_domain = system.cpu_voltage_domain)
# Set the globally required parameters.
datapath = HybridDatapath(
clk_domain = clk_domain,
benchName = config.get(accel, "bench_name"),
outputPrefix = config.get(accel, "bench_name"),
traceFileName = config.get(accel, "trace_file_name"),
configFileName = config.get(accel, "config_file_name"),
acceleratorName = "datapath%d" % config.getint(accel, "accelerator_id"),
acceleratorId = config.getint(accel, "accelerator_id"),
cycleTime = cycleTime,
useDb = config.getboolean(accel, "use_db"),
experimentName = config.get(accel, "experiment_name"),
enableStatsDump = options.enable_stats_dump,
executeStandalone = (np == 0))
datapath.dmaSetupOverhead = config.getint(accel, "dma_setup_overhead")
datapath.maxDmaRequests = options.dma_outstanding_requests
datapath.multiChannelDMA = config.getboolean(accel, "dma_multi_channel")
datapath.dmaChunkSize = config.getint(accel, "dma_chunk_size")
datapath.pipelinedDma = config.getboolean(accel, "pipelined_dma")
datapath.ignoreCacheFlush = config.getboolean(accel, "ignore_cache_flush")
datapath.invalidateOnDmaStore = config.getboolean(accel, "invalidate_on_dma_store")
datapath.dmaFetchFromDRAM = config.getboolean(accel, "dma_fetch_from_dram")
datapath.isPerfectTranslation = config.getboolean(accel, "is_perfect_translation")
datapath.cacheForwarding = config.getboolean(accel, "cache_forwarding")
datapath.hostPageWalk = config.getboolean(accel, "host_page_walk")
if memory_type == "cache":
datapath.cacheSize = config.get(accel, "cache_size")
datapath.cacheBandwidth = config.get(accel, "cache_bandwidth")
datapath.cacheQueueSize = config.get(accel, "cache_queue_size")
datapath.cacheAssoc = config.getint(accel, "cache_assoc")
datapath.cacheHitLatency = config.getint(accel, "cache_hit_latency")
datapath.cacheLineSize = options.cacheline_size
datapath.cactiCacheConfig = config.get(accel, "cacti_cache_config")
datapath.tlbEntries = config.getint(accel, "tlb_entries")
datapath.tlbAssoc = config.getint(accel, "tlb_assoc")
datapath.tlbAccessLatency = config.getint(accel, "tlb_access_latency")
datapath.tlbHitLatency = config.getint(accel, "tlb_hit_latency")
datapath.tlbMissLatency = config.getint(accel, "tlb_miss_latency")
datapath.tlbCactiConfig = config.get(accel, "cacti_tlb_config")
datapath.tlbPageBytes = config.getint(accel, "tlb_page_size")
datapath.numOutStandingWalks = config.getint(
accel, "tlb_max_outstanding_walks")
datapath.tlbBandwidth = config.getint(accel, "tlb_bandwidth")
if (memory_type != "cache" and memory_type != "spad"):
fatal("Aladdin configuration file specified invalid memory type %s for "
"accelerator %s." % (memory_type, accel))
datapaths.append(datapath)
system.datapaths = datapaths
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
options.use_map = True
# Manually extract spad_port of accelerator
# In order to fit the Ruby system
dma_ports = []
for datapath in system.datapaths:
dma_ports.append(datapath.spad_port)
Ruby.create_system(options, False, system, None, dma_ports)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
assert(len(system.datapaths) == len(system.ruby._accel_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
for i in xrange(len(system.datapaths)):
ruby_port = system.ruby._accel_ruby_ports[i]
system.datapaths[i].cache_port = ruby_port.slave
else:
system.membus = SystemXBar(is_perfect_bus=options.is_perfect_bus,
width=options.xbar_width)
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
|
|
import pytest
import importlib
from unittest import mock
from google.protobuf import json_format
from google.protobuf import struct_pb2
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import models
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
from google.cloud.aiplatform_v1.services.model_service import (
client as model_service_client,
)
from google.cloud.aiplatform_v1.services.pipeline_service import (
client as pipeline_service_client,
)
from google.cloud.aiplatform_v1.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
pipeline_state as gca_pipeline_state,
training_pipeline as gca_training_pipeline,
)
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
_TEST_DATASET_DISPLAY_NAME = "test-dataset-display-name"
_TEST_DATASET_NAME = "test-dataset-name"
_TEST_DISPLAY_NAME = "test-display-name"
_TEST_METADATA_SCHEMA_URI_IMAGE = schema.dataset.metadata.image
_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS = 7500
_TEST_TRAINING_DISABLE_EARLY_STOPPING = True
_TEST_MODEL_TYPE_ICN = "CLOUD" # Image Classification default
_TEST_MODEL_TYPE_IOD = "CLOUD_HIGH_ACCURACY_1" # Image Object Detection default
_TEST_MODEL_TYPE_MOBILE = "MOBILE_TF_LOW_LATENCY_1"
_TEST_PREDICTION_TYPE_ICN = "classification"
_TEST_PREDICTION_TYPE_IOD = "object_detection"
_TEST_DATASET_NAME = "test-dataset-name"
_TEST_MODEL_DISPLAY_NAME = "model-display-name"
_TEST_MODEL_ID = "98777645321"
_TEST_LABELS = {"key": "value"}
_TEST_MODEL_LABELS = {"model_key": "model_value"}
_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
{
"modelType": "CLOUD",
"budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
"multiLabel": False,
"disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
},
struct_pb2.Value(),
)
_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL = json_format.ParseDict(
{
"modelType": "CLOUD",
"budgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
"multiLabel": False,
"disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
"baseModelId": _TEST_MODEL_ID,
},
struct_pb2.Value(),
)
_TEST_FRACTION_SPLIT_TRAINING = 0.6
_TEST_FRACTION_SPLIT_VALIDATION = 0.2
_TEST_FRACTION_SPLIT_TEST = 0.2
_TEST_FILTER_SPLIT_TRAINING = "train"
_TEST_FILTER_SPLIT_VALIDATION = "validate"
_TEST_FILTER_SPLIT_TEST = "test"
_TEST_MODEL_NAME = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_MODEL_ID}"
)
_TEST_PIPELINE_RESOURCE_NAME = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345"
)
# CMEK encryption
_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default"
_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
)
_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
)
_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
)
@pytest.fixture
def mock_pipeline_service_create():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
)
yield mock_create_training_pipeline
@pytest.fixture
def mock_pipeline_service_get():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
)
yield mock_get_training_pipeline
@pytest.fixture
def mock_pipeline_service_create_and_get_with_fail():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
)
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
)
yield mock_create_training_pipeline, mock_get_training_pipeline
@pytest.fixture
def mock_model_service_get():
with mock.patch.object(
model_service_client.ModelServiceClient, "get_model"
) as mock_get_model:
mock_get_model.return_value = gca_model.Model()
yield mock_get_model
@pytest.fixture
def mock_dataset_image():
ds = mock.MagicMock(datasets.ImageDataset)
ds.name = _TEST_DATASET_NAME
ds.metadata_schema_uri = _TEST_METADATA_SCHEMA_URI_IMAGE
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_IMAGE,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
return ds
@pytest.fixture
def mock_model():
model = mock.MagicMock(models.Model)
model.name = _TEST_MODEL_ID
model._latest_future = None
model._exception = None
model._gca_resource = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
description="This is the mock Model's description",
name=_TEST_MODEL_NAME,
)
yield model
class TestAutoMLImageTrainingJob:
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
def test_init_all_parameters(self, mock_model):
"""Ensure all private members are set correctly at initialization"""
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME,
prediction_type=_TEST_PREDICTION_TYPE_ICN,
model_type=_TEST_MODEL_TYPE_MOBILE,
base_model=mock_model,
multi_label=True,
)
assert job._display_name == _TEST_DISPLAY_NAME
assert job._model_type == _TEST_MODEL_TYPE_MOBILE
assert job._prediction_type == _TEST_PREDICTION_TYPE_ICN
assert job._multi_label is True
assert job._base_model == mock_model
def test_init_wrong_parameters(self, mock_model):
"""Ensure correct exceptions are raised when initializing with invalid args"""
aiplatform.init(project=_TEST_PROJECT)
with pytest.raises(ValueError, match=r"not a supported prediction type"):
training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, prediction_type="abcdefg",
)
with pytest.raises(ValueError, match=r"not a supported model_type for"):
training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME,
prediction_type="classification",
model_type=_TEST_MODEL_TYPE_IOD,
)
with pytest.raises(ValueError, match=r"`base_model` is only supported"):
training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME,
prediction_type=_TEST_PREDICTION_TYPE_IOD,
base_model=mock_model,
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_image,
mock_model_service_get,
mock_model,
sync,
):
"""Create and run an AutoML ICN training job, verify calls and return value"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, base_model=mock_model, labels=_TEST_LABELS,
)
model_from_job = job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
model_labels=_TEST_MODEL_LABELS,
training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
test_filter_split=_TEST_FILTER_SPLIT_TEST,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
)
if not sync:
model_from_job.wait()
true_filter_split = gca_training_pipeline.FilterSplit(
training_filter=_TEST_FILTER_SPLIT_TRAINING,
validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
test_filter=_TEST_FILTER_SPLIT_TEST,
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
labels=mock_model._gca_resource.labels,
description=mock_model._gca_resource.description,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
filter_split=true_filter_split, dataset_id=mock_dataset_image.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_image_classification,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
)
mock_model_service_get.assert_called_once_with(
name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
)
assert job._gca_resource is mock_pipeline_service_get.return_value
assert model_from_job._gca_resource is mock_model_service_get.return_value
assert job.get_model()._gca_resource is mock_model_service_get.return_value
assert not job.has_failed
assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
@pytest.mark.usefixtures("mock_pipeline_service_get")
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
self,
mock_pipeline_service_create,
mock_dataset_image,
mock_model_service_get,
sync,
):
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
)
model_from_job = job.run(
dataset=mock_dataset_image,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
)
if not sync:
model_from_job.wait()
# Test that if defaults to the job display name
true_managed_model = gca_model.Model(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_image.name
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_image_classification,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
)
@pytest.mark.usefixtures(
"mock_pipeline_service_create",
"mock_pipeline_service_get",
"mock_model_service_get",
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_called_twice_raises(self, mock_dataset_image, sync):
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(display_name=_TEST_DISPLAY_NAME,)
job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
)
with pytest.raises(RuntimeError):
job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
sync=sync,
)
@pytest.mark.usefixtures(
"mock_pipeline_service_create",
"mock_pipeline_service_get",
"mock_model_service_get",
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_with_two_split_raises(
self, mock_dataset_image, sync,
):
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(display_name=_TEST_DISPLAY_NAME,)
with pytest.raises(ValueError):
model_from_job = job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
test_filter_split=_TEST_FILTER_SPLIT_TEST,
sync=sync,
)
if not sync:
model_from_job.wait()
@pytest.mark.parametrize("sync", [True, False])
def test_run_raises_if_pipeline_fails(
self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_image, sync
):
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(display_name=_TEST_DISPLAY_NAME,)
with pytest.raises(RuntimeError):
job.run(
model_display_name=_TEST_MODEL_DISPLAY_NAME,
dataset=mock_dataset_image,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
sync=sync,
)
if not sync:
job.wait()
with pytest.raises(RuntimeError):
job.get_model()
def test_raises_before_run_is_called(self, mock_pipeline_service_create):
aiplatform.init(project=_TEST_PROJECT)
job = training_jobs.AutoMLImageTrainingJob(display_name=_TEST_DISPLAY_NAME,)
with pytest.raises(RuntimeError):
job.get_model()
with pytest.raises(RuntimeError):
job.has_failed
with pytest.raises(RuntimeError):
job.state
@pytest.mark.parametrize("sync", [True, False])
def test_splits_fraction(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_image,
mock_model_service_get,
mock_model,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, base_model=mock_model
)
model_from_job = job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
)
if not sync:
model_from_job.wait()
true_fraction_split = gca_training_pipeline.FractionSplit(
training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction=_TEST_FRACTION_SPLIT_TEST,
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
description=mock_model._gca_resource.description,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
fraction_split=true_fraction_split, dataset_id=mock_dataset_image.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_image_classification,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_filter(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_image,
mock_model_service_get,
mock_model,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, base_model=mock_model
)
model_from_job = job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION,
test_filter_split=_TEST_FILTER_SPLIT_TEST,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
)
if not sync:
model_from_job.wait()
true_filter_split = gca_training_pipeline.FilterSplit(
training_filter=_TEST_FILTER_SPLIT_TRAINING,
validation_filter=_TEST_FILTER_SPLIT_VALIDATION,
test_filter=_TEST_FILTER_SPLIT_TEST,
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
description=mock_model._gca_resource.description,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
filter_split=true_filter_split, dataset_id=mock_dataset_image.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_image_classification,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_default(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_image,
mock_model_service_get,
mock_model,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, base_model=mock_model
)
model_from_job = job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
)
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
description=mock_model._gca_resource.description,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_image.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_image_classification,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
)
def test_splits_filter_incomplete(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_image,
mock_model_service_get,
mock_model,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLImageTrainingJob(
display_name=_TEST_DISPLAY_NAME, base_model=mock_model
)
with pytest.raises(ValueError):
job.run(
dataset=mock_dataset_image,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_filter_split=_TEST_FILTER_SPLIT_TRAINING,
validation_fraction_split=None,
test_filter_split=_TEST_FILTER_SPLIT_TEST,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
)
|
|
print(" # db_model.py")
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from basic_wms import app
DB_FILENAME = 'database.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///model/' + DB_FILENAME
# app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
class CommonFieldsSQLA:
"""
Here fields and methods common to all SQL Alchemy ORM objects are created.
"""
_id = db.Column(db.Integer, primary_key=True, nullable=False)
_deleted = db.Column(db.Boolean, nullable=False)
def __repr__(self):
return self.__str__()
def init_common_fields(self):
self._deleted = False
@classmethod
def get_one(cls, id_):
"""
Returns individual entity with given *id*
or None if there is no such an entity.
"""
query1 = cls.query.filter_by(_id=id_)
if query1.count() > 0:
return query1.one()
else:
return None
@classmethod
def get_all(cls):
""" Yields all entities."""
entities = cls.query.all()
for entity in entities:
yield entity
@property
def id_(self):
return self._id
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = value
class WarehouseSQLA(db.Model, CommonFieldsSQLA):
__tablename__ = 'warehouse'
_name = db.Column(db.String(80), unique=True, nullable=False)
_location = db.Column(db.String(120), nullable=False)
def __init__(self, name, location):
self.init_common_fields()
self._name = name
self._location = location
def __str__(self):
return "<Warehouse #{}>".format(self._id)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def serialize(self):
"""
Returns dictionary with serialized object's fields:
{'id': int, 'deleted': bool, 'name': str, 'location': str}.
"""
return {
'id': self.id_,
'deleted': self.deleted,
'name': self.name,
'location': self.location,
}
class ItemTypeSQLA(db.Model, CommonFieldsSQLA):
__tablename__ = 'item_type'
_name = db.Column(db.String(45), nullable=False)
_item_model = db.Column(db.String(45), nullable=False)
_manufacturer = db.Column(db.String(45), nullable=False)
_unit_of_measure = db.Column(db.String(45), nullable=False)
def __init__(self, name, item_model, manufacturer, unit_of_measure):
self.init_common_fields()
self._name = name
self._item_model = item_model
self._manufacturer = manufacturer
self._unit_of_measure = unit_of_measure
def __str__(self):
return "<ItemType #{}, name: {}>".format(self._id, self._name)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def item_model(self):
return self._item_model
@item_model.setter
def item_model(self, value):
self._item_model = value
@property
def manufacturer(self):
return self._manufacturer
@manufacturer.setter
def manufacturer(self, value):
self._manufacturer = value
@property
def unit_of_measure(self):
return self._unit_of_measure
@unit_of_measure.setter
def unit_of_measure(self, value):
self._unit_of_measure = value
@property
def serialize(self):
"""
Returns dictionary with serialized object's fields:
{'id': int, 'deleted': bool, 'name': str, 'item_model': str,
'manufacturer': str, 'unit_of_measure': str}.
"""
return {
'id': self.id_,
'deleted': self.deleted,
'name': self.name,
'item_model': self.item_model,
'manufacturer': self.manufacturer,
'unit_of_measure': self.unit_of_measure
}
class SupplierSQLA(db.Model, CommonFieldsSQLA):
__tablename__ = 'supplier'
# 'VATIN' = 'VAT Identification Number' (NIP in Poland)
_VATIN = db.Column(db.String(45), nullable=False, unique=True)
_name = db.Column(db.String(45), nullable=False)
_location = db.Column(db.String(45), nullable=False)
def __init__(self, VATIN, name, location):
self.init_common_fields()
self._VATIN = VATIN
self._name = name
self._location = location
def __str__(self):
return "<Supplier #{}, name: {}>".format(self._id, self._name)
@property
def VATIN(self):
return self._VATIN
@VATIN.setter
def VATIN(self, value):
self._VATIN = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def serialize(self):
"""
Returns dictionary with serialized object's fields:
{'id': int, 'deleted': bool, 'VATIN': str, 'name': str,
'location': str}.
"""
return {
'id': self.id_,
'deleted': self.deleted,
'VATIN': self.VATIN,
'name': self.name,
'location': self.location
}
class ItemBatchSQLA(db.Model, CommonFieldsSQLA):
__tablename__ = 'item_batch'
_quantity = db.Column(db.Integer, nullable=False)
_warehouse_id = db.Column(db.Integer, db.ForeignKey("warehouse._id"),
nullable=False)
_warehouse = db.relationship("WarehouseSQLA",
backref=db.backref("item_batches",
lazy="dynamic"))
_supplier_id = db.Column(db.Integer, db.ForeignKey("supplier._id"),
nullable=False)
_supplier = db.relationship("SupplierSQLA",
backref=db.backref("item_batches",
lazy="dynamic"))
_item_type_id = db.Column(db.Integer, db.ForeignKey("item_type._id"),
nullable=False)
_item_type = db.relationship("ItemTypeSQLA",
backref=db.backref("item_batches",
lazy="dynamic"))
def __init__(self, quantity, warehouse, supplier, item_type):
self.init_common_fields()
self._quantity = quantity
self._warehouse = warehouse
self._supplier = supplier
self._item_type = item_type
def __str__(self):
return "<ItemBatch #{}>".format(self._id)
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@property
def warehouse(self):
return self._warehouse
@warehouse.setter
def warehouse(self, value):
self._warehouse = value
@property
def supplier(self):
return self._supplier
@supplier.setter
def supplier(self, value):
self._supplier = value
@property
def item_type(self):
return self._item_type
@item_type.setter
def item_type(self, value):
self._item_type = value
@property
def serialize(self):
"""
Returns dictionary with serialized object's fields:
{'id': int, 'deleted': bool, 'quantity': str,
'warehouse_id': int, 'supplier_id': int, 'item_type_id': int}.
"""
return {
'id': self.id_,
'quantity': self.quantity,
'warehouse_id': self.warehouse.id_,
'supplier_id': self.supplier.id_,
'item_type_id': self.item_type.id_
}
if __name__ == "__main__":
db.create_all()
|
|
from __future__ import print_function
from os.path import dirname, abspath
from subprocess import check_output
import json
import os
import requests
import subprocess
import pytest
from pprint import pprint
DATADIR = "testdata"
DEBUG = 0
SHOCK_URL = ""
SHOCK_USER_AUTH = ""
SHOCK_ADMIN_AUTH = ""
AUTH = ""
FILELIST = []
TESTHEADERS = {}
DONTDELETE = 0
class TestClass:
@pytest.fixture(scope="session", autouse=True)
def execute_before_any_test(self):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
print("execute_before_any_test started ----------------------------")
# DATADIR = dirname(abspath(__file__)) + "/testdata/"
global DEBUG
DEBUG = 1
#PORT = os.environ.get('SHOCK_PORT', "7445")
#URL = os.environ.get('SHOCK_HOST', "http://localhost")
#SHOCK_URL = URL + ":" + PORT
global SHOCK_URL
SHOCK_URL = os.environ.get('SHOCK_URL', "http://shock:7445")
#TOKEN = os.environ.get("MGRKEY")
# SHOCK_AUTH="bearer token"
global SHOCK_AUTH
# default AUTH is USER AUTH
global AUTH
global SHOCK_USER_AUTH
global FILELIST
FILELIST = ["AAA.txt", "BBB.txt", "CCC.txt"]
# SHOCK_USER_AUTH="bearer token"
SHOCK_USER_AUTH = os.environ.get("SHOCK_USER_AUTH", "basic dXNlcjE6c2VjcmV0")
SHOCK_ADMIN_AUTH = os.environ.get("SHOCK_ADMIN_AUTH", "basic YWRtaW46c2VjcmV0")
AUTH=SHOCK_USER_AUTH
global TESTHEADERS
TESTHEADERS = {"Authorization": SHOCK_USER_AUTH}
global TESTAHEADERS
TESTAHEADERS = {"Authorization": SHOCK_ADMIN_AUTH}
#if URL == "https://sequencing.bio.anl.gov":
# TESTHEADERS= {"AUTH" : TOKEN}
global DONTDELETE
DONTDELETE = 0
return
def create_nodes(self, FILELIST):
'''Takes a list of filenames, uploads to shock, returns list of shock ids.'''
NODES = []
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# to get multipart-form correctly, data has to be specified in this strange way
# and passed as the files= parameter to requests.put
FORMDATA = {"attributes_str": (None, '{"project_id":"TESTPROJECT"}')}
for FILE in FILELIST:
if not FILE.startswith(DATADIR):
FILE=os.path.join(DATADIR, FILE)
FILES = {'upload': open(FILE, 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES, data=FORMDATA)
data = json.loads(response.content.decode("utf-8"))
assert "attributes" in data["data"], data
assert data["data"]["attributes"] is not None , data
assert data["status"] == 200, data["error"]
assert data["data"]["attributes"]["project_id"] == "TESTPROJECT"
NODES += [data["data"]["id"]]
if DEBUG:
print("PUT", SHOCK_URL + "/node/" + NODES[-1], FORMDATA)
r = requests.put(TESTURL + "/" + NODES[-1],
files=FORMDATA,
headers=TESTHEADERS)
if DEBUG:
print("RESPONSE:", r.content.decode("utf-8"))
data = json.loads(r.content.decode("utf-8"))
assert data["data"]["attributes"]["project_id"] == "TESTPROJECT"
return(NODES)
def confirm_nodes_project(self, NODES, PROJECT):
'''Tests a list of nodes to makes sure that attributes->project_id is the same as PROJECT'''
for NODEID in NODES:
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
if DEBUG:
print("curl '{}' -H 'Authorization: {}'".format(TESTURL, AUTH))
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert PROJECT in data["data"]["attributes"]["project_id"]
def delete_nodes(self, NODELIST):
'''Delete nodes, confirm http response only'''
for NODEID in NODELIST:
NODEURL = SHOCK_URL + "/node/{}".format(NODEID)
if DEBUG:
print("DELETE", NODEURL, TESTHEADERS)
if not DONTDELETE:
response = requests.delete(NODEURL, headers=TESTHEADERS)
assert json.loads(response.content.decode("utf-8"))["status"] == 200
return
def test_delete_nodes(self):
assert DONTDELETE is not 1, "This test fails unless deleting is enabled"
NODEID = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = SHOCK_URL + "/node/{}".format(NODEID)
if DEBUG:
print("GET", NODEURL, TESTHEADERS)
predeleteresponse = requests.get(NODEURL, headers=TESTHEADERS)
assert predeleteresponse.status_code == 200
assert "Node not found" not in predeleteresponse.content.decode("utf-8")
self.delete_nodes([NODEID])
if DEBUG:
print("GET", NODEURL, TESTHEADERS)
postdeleteresponse = requests.get(NODEURL, headers=TESTHEADERS)
assert postdeleteresponse.status_code == 404
assert "Node not found" in postdeleteresponse.content.decode("utf-8")
def test_nodelist_noauth(self):
TESTURL = "{SHOCK_URL}/node/?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {}
TESTHEADERS = {}
if DEBUG:
print("GET", TESTURL, TESTDATA, TESTHEADERS)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
assert data["total_count"] >= 0
def test_nodelist_auth(self):
TESTURL = "{SHOCK_URL}/node/?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {}
if DEBUG:
print("GET", TESTURL, TESTDATA, TESTHEADERS)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
assert data["total_count"] >= 0
def test_nodelist_badauth(self):
TESTURL = "{SHOCK_URL}/node/?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {}
TESTHEADERS = {"Authorization": "OAuth BADTOKENREJECTME"}
if DEBUG:
print("GET", TESTURL, TESTDATA, TESTHEADERS)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
assert response.status_code == 403 or response.status_code == 400, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
# 403 unauthorized 400 bad query
assert data["status"] == 403 or data["status"] == 400
def test_upload_emptyfile(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'emptyfile'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
if DEBUG:
print("RESPONSE", response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
assert data["data"]["file"]["checksum"]["md5"] == "d41d8cd98f00b204e9800998ecf8427e"
# cleanup
NODEID = data["data"]["id"]
self.delete_nodes([NODEID])
def test_upload_threefiles(self):
NODES = self.create_nodes(FILELIST)
TESTURL = "{SHOCK_URL}/node/?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {}
if DEBUG:
print("GET", TESTURL, TESTHEADERS, TESTDATA)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
data = json.loads(response.content.decode("utf-8"))
assert data["total_count"] >= 3
assert NODES[0] in response.content.decode("utf-8")
assert b"AAA.txt" in response.content
assert b"BBB.txt" in response.content
assert b"CCC.txt" in response.content
# cleanup
self.delete_nodes(NODES)
def test_upload_and_delete_node(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'CCC.txt'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
data = json.loads(response.content.decode("utf-8"))
NODEID = data["data"]["id"]
# test my node exists
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
# delete my node
if DEBUG:
print("DELETE", TESTURL, TESTHEADERS)
TESTURL = SHOCK_URL+"/node/{}".format(NODEID)
response = requests.delete(TESTURL, headers=TESTHEADERS)
data = json.loads(response.content.decode("utf-8"))
# test my node is gone
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 404, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 404
def test_upload_and_download_node_GET(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'CCC.txt'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
data = json.loads(response.content.decode("utf-8"))
NODEID = data["data"]["id"]
# test my node exists
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
FILES = {}
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
DLURL = SHOCK_URL + "/node/{}?download".format(NODEID)
response = requests.get(DLURL, headers=TESTHEADERS)
assert response.content[0:3] == b"CCC"
# cleanup
self.delete_nodes([NODEID])
def test_upload_and_download_node_GET_gzip(self):
# download file in compressed format, works with all the above options
# curl -X GET http://<host>[:<port>]/node/<node_id>?download&compression=<zip|gzip>
# upload node
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'CCC.txt'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
data = json.loads(response.content.decode("utf-8"))
NODEID = data["data"]["id"]
# test my node exists
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
FILES = {}
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
# Download node
DLURL = SHOCK_URL + "/node/{}?download&compression=gzip".format(NODEID)
if DEBUG:
print("GET", DLURL, TESTHEADERS)
response = requests.get(DLURL, headers=TESTHEADERS)
assert response.content[0:3] != b"CCC"
# cleanup
self.delete_nodes([NODEID])
def test_upload_and_download_node_GET_zip(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'CCC.txt'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
data = json.loads(response.content.decode("utf-8"))
NODEID = data["data"]["id"]
# test my node exists
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
FILES = {}
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
DLURL = SHOCK_URL + "/node/{}?download&compression=zip".format(NODEID)
response = requests.get(DLURL, headers=TESTHEADERS)
assert response.content[0:3] != b"CCC"
# cleanup
self.delete_nodes([NODEID])
def test_upload_and_download_node_gzip(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
FILES = {'upload': open(os.path.join(DATADIR, 'CCC.txt'), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES)
data = json.loads(response.content.decode("utf-8"))
NODEID = data["data"]["id"]
# test my node exists
TESTURL = SHOCK_URL + "/node/{}".format(NODEID)
FILES = {}
if DEBUG:
print("GET", TESTURL, TESTHEADERS)
response = requests.get(TESTURL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
DLURL = SHOCK_URL + "/node/{}?download&compression=gzip".format(NODEID)
response = requests.get(DLURL, headers=TESTHEADERS)
assert response.content[0:3] != b"CCC"
# cleanup
self.delete_nodes([NODEID])
def test_download_url_zip_GET(self):
NODES = self.create_nodes(FILELIST)
# confirm nodes exist
self.confirm_nodes_project(NODES, "TESTPROJECT")
# query for TESTDATA
assert SHOCK_URL != None
print("SHOCK_URL:"+SHOCK_URL)
TESTURL = "{SHOCK_URL}/node?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {"project_id": "TESTPROJECT"}
if DEBUG:
print("GET", TESTURL, TESTDATA)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
if DEBUG:
print("RESPONSE", response.content)
data = json.loads(response.content.decode("utf-8"))
assert "total_count" in data
assert data["total_count"] >= 3, "Missing or incorrect total_count" + " ".join([str(response.status_code), str(response.content)])
assert NODES[0] in response.content.decode("utf-8"), NODES[0] + " not in " + response.content.decode("utf-8")
# issue query for TESTPROJECT FILES downloaded as ZIP
TESTURL = SHOCK_URL+"/node?query&download_url&archive=zip"
if DEBUG:
print("curl '{}' -H 'Authorization: {}' -G -d {}".format(TESTURL, AUTH, TESTDATA))
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
print(" ".join([ "Debugging ZIP Download", str(response.status_code), str(response.content)]))
data = json.loads(response.content.decode("utf-8"))
# extract preauth uri from response
assert "data" in data
PREAUTH_URL = data["data"]["url"] # example: http://localhost/preauth/TbqTUadG42vVf72LkWRg
TESTURL=PREAUTH_URL
if DEBUG:
print("GET", TESTURL, TESTHEADERS);
with requests.get(TESTURL, headers=TESTHEADERS, stream=True) as response:
# write it to file and test ZIP
print("Debugging status code: " + str(response.status_code))
if response.encoding is None:
response.encoding = 'utf-8'
# subprocess.run(["ls", "-l"], shell=True)
with open("TEST.zip", "wb") as F:
subprocess.run("ls -l TEST.zip", shell=True)
for chunk in response.iter_content(chunk_size=512):
if chunk:
F.write(chunk)
subprocess.run("ls -l TEST.zip", shell=True)
out = check_output("unzip -l TEST.zip", shell=True)
assert b'TEST.zip' in out
assert b'CCC.txt' in out
assert b' 4 ' in out # This fails if there are no 4-byte-files
# cleanup
self.delete_nodes(NODES)
def test_download_url_tar_GET(self):
# Per test invokation on https://github.com/MG-RAST/Shock/wiki/API
# download multiple files in a single archive format (zip or tar), returns 1-time use download url for archive
# use download_url with a standard query
# curl -X GET http://<host>[:<port>]/node?query&download_url&archive=zip&<key>=<value>
NODES = self.create_nodes(FILELIST)
# confirm nodes exist
self.confirm_nodes_project(NODES, "TESTPROJECT")
# query for TESTDATA
TESTURL = "{SHOCK_URL}/node?query".format(SHOCK_URL=SHOCK_URL)
TESTDATA = {"project_id": "TESTPROJECT"}
if DEBUG:
print("GET", TESTURL, TESTDATA)
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
if DEBUG:
print("RESPONSE 1 :", response.content)
# if DEBUG: print("RESPONSE", response.content)
data = json.loads(response.content.decode("utf-8"))
assert "total_count" in data
assert data["total_count"] >= 3
assert NODES[0] in response.content.decode("utf-8")
# issue query for TESTPROJECT FILES downloaded as ZIP
TESTURL = SHOCK_URL+"/node?query&download_url&archive=tar".format()
if DEBUG:
print("curl '{}' -H 'Authorization: {}' -G -d {}".format(TESTURL, AUTH, TESTDATA))
response = requests.get(TESTURL, headers=TESTHEADERS, params=TESTDATA)
if DEBUG:
print("RESPONSE 2 :", response.content)
data = json.loads(response.content.decode("utf-8"))
# extract preauth uri from response
assert "data" in data
assert "url" in data["data"]
PREAUTH = data["data"]["url"]
if DEBUG:
print("GET", PREAUTH, TESTHEADERS)
response = requests.get(PREAUTH, headers=TESTHEADERS)
if DEBUG:
print("RESPONSE 3 :", response.content)
# write it to file and test ZIP
with open("TEST.tar", "wb") as f:
f.write(response.content)
out = check_output("tar tvf TEST.tar", shell=True)
assert b'CCC.txt' in out
assert b' 4 ' in out # This fails if there are no 4-byte-files
# cleanup
self.delete_nodes(NODES)
def test_download_url_tar_POST(self):
NODES = self.create_nodes(FILELIST)
# confirm nodes exist
self.confirm_nodes_project(NODES, "TESTPROJECT")
# query for TESTDATA
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# Remember, multipart-forms that are not files have format {key: (None, value)}
FORMDATA = {"ids": (None, ",".join(NODES)),
"download_url": (None, 1),
"archive_format": (None, "tar")}
# issue query for TESTPROJECT FILES downloaded as TAR
if DEBUG:
print("POST", TESTURL, FORMDATA)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FORMDATA)
if DEBUG:
print("RESPONSE 1 :", response.content)
data = json.loads(response.content.decode("utf-8"))
# extract preauth uri from response
PREAUTH = data["data"]["url"]
if DEBUG:
print("GET", PREAUTH, TESTHEADERS)
response = requests.get(PREAUTH, headers=TESTHEADERS)
if DEBUG:
print("RESPONSE 2 :", response.content)
# write it to file and test ZIP
with open("TESTP.tar", "wb") as f:
f.write(response.content)
out = check_output("tar tvf TESTP.tar", shell=True)
assert b'CCC.txt' in out
assert b' 4 ' in out # This fails if there are no 4-byte-files
# cleanup
self.delete_nodes(NODES)
def test_download_url_zip_POST(self):
# Per test invokation on https://github.com/MG-RAST/Shock/wiki/API
# use download_url with a POST and list of node ids
# curl -X POST -F "download_url=1" -F "archive_format=zip" -F "ids=<node_id_1>,<node_id_2>,<...>" http://<host>[:<port>]/node
NODES = self.create_nodes(FILELIST)
# confirm nodes exist
self.confirm_nodes_project(NODES, "TESTPROJECT")
# query for TESTDATA
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# Remember, multipart-forms that are not files have format {key: (None, value)}
FORMDATA = {"ids": (None, ",".join(NODES)),
"download_url": (None, 1),
"archive_format": (None, "zip")}
# issue query for TESTPROJECT FILES downloaded as TAR
if DEBUG:
print("POST", TESTURL, FORMDATA)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FORMDATA)
data = json.loads(response.content.decode("utf-8"))
# extract preauth uri from response
if DEBUG:
print("RESPONSE", response)
PREAUTH = data["data"]["url"]
if DEBUG:
print("Debugging receiving : " + PREAUTH)
response = requests.get(PREAUTH, headers=TESTHEADERS)
if DEBUG:
print("Debugging status code: " + str(response.status_code))
# write it to file and test ZIP
with open("TESTP.zip", "wb") as f:
f.write(response.content)
out = check_output("unzip -l TESTP.zip", shell=True)
assert b'CCC.txt' in out
assert b' 4 ' in out # This fails if there are no 4-byte-files
# cleanup
self.delete_nodes(NODES)
def test_put_attributesstr(self):
'''Test PUT request containing attributes_str populates attributes'''
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
FORMDATA = {"attributes_str": (None, '{"project_id":"TESTPROJECT2"}')}
if DEBUG:
print("PUT", SHOCK_URL + "/node/" + NODE, FORMDATA)
r = requests.put(SHOCK_URL + "/node/" +
NODE, files=FORMDATA, headers=TESTHEADERS)
if DEBUG:
print("RESPONSE", r.content.decode("utf-8"))
data = json.loads(r.content.decode("utf-8"))
if DEBUG:
print("DATA", data)
assert data["data"]["attributes"]["project_id"] == "TESTPROJECT2"
FORMDATA = {"attributes_str": (None, '{"project_id":"TESTPROJECT"}')}
if DEBUG:
print("PUT", SHOCK_URL + "/node/" + NODE, FORMDATA)
r = requests.put(SHOCK_URL + "/node/" +
NODE, files=FORMDATA, headers=TESTHEADERS)
if DEBUG:
print("RESPONSE", r.content.decode("utf-8"))
data = json.loads(r.content.decode("utf-8"))
assert data["data"]["attributes"]["project_id"] == "TESTPROJECT"
self.delete_nodes([NODE])
def test_post_attributes(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# to get multipart-form correctly, data has to be specified in this strange way
# and passed as the files= parameter to requests.put
TESTDATA = {}
FILES = {'attributes': open(os.path.join(DATADIR, "attr.json"), 'rb'),
'upload': open(os.path.join(DATADIR, "AAA.txt"), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTDATA, TESTHEADERS, FILES)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES, data=TESTDATA)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
NODE = data["data"]["id"]
assert data["data"]["file"]["name"] == "AAA.txt"
assert data["data"]["attributes"]["format"] == "replace_format"
self.delete_nodes([NODE])
def test_post_gzip(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# to get multipart-form correctly, data has to be specified in this strange way
# and passed as the files= parameter to requests.put
TESTDATA = {"attributes_str": (None, '{"project_id":"TESTPROJECT"}')}
FILES = {'gzip': open(os.path.join(DATADIR, "10kb.fna.gz"), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTDATA, TESTHEADERS)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES, data=TESTDATA)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
NODE = data["data"]["id"]
assert data["data"]["file"]["name"] == "10kb.fna"
assert data["data"]["file"]["checksum"]["md5"] == "730c276ea1510e2b7ef6b682094dd889"
self.delete_nodes([NODE])
def test_post_bzip(self):
TESTURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# to get multipart-form correctly, data has to be specified in this strange way
# and passed as the files= parameter to requests.put
TESTDATA = {"attributes_str": (None, '{"project_id":"TESTPROJECT"}')}
FILES = {'bzip2': open(os.path.join(DATADIR, "10kb.fna.bz2"), 'rb')}
if DEBUG:
print("POST", TESTURL, TESTDATA, TESTHEADERS)
response = requests.post(TESTURL, headers=TESTHEADERS, files=FILES, data=TESTDATA)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
NODE = data["data"]["id"]
assert data["data"]["file"]["name"] == "10kb.fna"
assert data["data"]["file"]["checksum"]["md5"] == "730c276ea1510e2b7ef6b682094dd889"
self.delete_nodes([NODE])
def test_copynode(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
# to get multipart-form correctly, data has to be specified in this strange way
# and passed as the files= parameter to requests.put
TESTDATA = {"copy_data": (None, NODE)}
if DEBUG:
print("POST", NODEURL, TESTDATA, TESTHEADERS)
response = requests.post(NODEURL, headers=TESTHEADERS, files=TESTDATA)
assert response.status_code == 200
data = json.loads(response.content.decode("utf-8"))
print(data)
NODE2 = data["data"]["id"]
NODE2URL = "{SHOCK_URL}/node/{NODE2}".format(SHOCK_URL=SHOCK_URL, NODE2=NODE2)
if DEBUG:
print("GET", NODE2URL, TESTHEADERS)
response = requests.get(NODE2URL, headers=TESTHEADERS)
assert response.status_code == 200, response.content.decode("utf-8")
data = json.loads(response.content.decode("utf-8"))
assert data["status"] == 200, data["error"]
assert data["data"]["file"]["checksum"]["md5"] == "8880cd8c1fb402585779766f681b868b" # AAA.txt
self.delete_nodes([NODE, NODE2])
def test_querynode_md5(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
PARAMS = {"querynode": "1", "file.checksum.md5": "8880cd8c1fb402585779766f681b868b"}
response = requests.get(NODEURL, headers=TESTHEADERS, params=PARAMS)
assert response.status_code == 200
data = json.loads(response.content.decode("utf-8"))
self.delete_nodes([NODE])
assert "total_count" in data.keys(), data
assert data["total_count"] > 0, data
assert data["data"][0]["file"]["checksum"]["md5"] == "8880cd8c1fb402585779766f681b868b"
def test_querynode_name(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
PARAMS = {"querynode": "1", "file.name": "AAA.txt"}
response = requests.get(NODEURL, headers=TESTHEADERS, params=PARAMS)
assert response.status_code == 200
data = json.loads(response.content.decode("utf-8"))
self.delete_nodes([NODE])
assert "total_count" in data.keys(), data
assert data["total_count"] > 0, data
assert data["data"][0]["file"]["name"] == "AAA.txt"
def test_get_location_info(self):
LOCATION = "S3" # this is defined in the Locations.yaml in {REPO}/test/config.d
TESTURL = "/".join( [SHOCK_URL , "location" , LOCATION , "info" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_get_location_missing(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
LOCATION = "S3" # this is defined in the Locations.yaml in {REPO}/test/config.d
TESTURL = "/".join( [SHOCK_URL , "location" , LOCATION , "missing" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_types_get_info(self):
LOCATION = "metadata"
TESTURL = "/".join( [SHOCK_URL , "types" , LOCATION , "info" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_get_location_info(self):
LOCATION = "S3" # this is defined in the Locations.yaml in {REPO}/test/config.d
TESTURL = "/".join( [SHOCK_URL , "location" , LOCATION , "info" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_get_location_missing1(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
LOCATION = "S3" # this is defined in the Locations.yaml in {REPO}/test/config.d
TESTURL = "/".join( [SHOCK_URL , "location" , LOCATION , "missing" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_NODE_set_location(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
PARAMS = {"id": "S3", "stored": "true"}
TESTURL = "/".join( [SHOCK_URL , "node", NODE, "locations" ] )
response = requests.post(TESTURL, headers=TESTAHEADERS, params=PARAMS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_get_location_missing2(self):
NODE = self.create_nodes([os.path.join(DATADIR, "AAA.txt")])[0]
NODEURL = "{SHOCK_URL}/node".format(SHOCK_URL=SHOCK_URL)
LOCATION = "S3" # this is defined in the Locations.yaml in {REPO}/test/config.d
TESTURL = "/".join( [SHOCK_URL , "location" , LOCATION , "missing" ] )
response = requests.get( TESTURL, headers=TESTAHEADERS)
if DEBUG:
print ("URL", TESTURL)
print("DATA", response.text)
assert response.status_code == 200
def test_node_set_location(self) :
pass
def test_node_get_location(self) :
pass
|
|
"""
Wrapper class around the ndarray object for the array API standard.
The array API standard defines some behaviors differently than ndarray, in
particular, type promotion rules are different (the standard has no
value-based casting). The standard also specifies a more limited subset of
array methods and functionalities than are implemented on ndarray. Since the
goal of the array_api namespace is to be a minimal implementation of the array
API standard, we need to define a separate wrapper class for the array_api
namespace.
The standard compliant class is only a wrapper class. It is *not* a subclass
of ndarray.
"""
from __future__ import annotations
import operator
from enum import IntEnum
from ._creation_functions import asarray
from ._dtypes import (
_all_dtypes,
_boolean_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_floating_dtypes,
_numeric_dtypes,
_result_type,
_dtype_categories,
)
from typing import TYPE_CHECKING, Optional, Tuple, Union, Any
if TYPE_CHECKING:
from ._typing import Any, PyCapsule, Device, Dtype
import numpy.typing as npt
import numpy as np
from numpy import array_api
class Array:
"""
n-d array object for the array API namespace.
See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
information.
This is a wrapper around numpy.ndarray that restricts the usage to only
those things that are required by the array API namespace. Note,
attributes on this object that start with a single underscore are not part
of the API specification and should only be used internally. This object
should not be constructed directly. Rather, use one of the creation
functions, such as asarray().
"""
# Use a custom constructor instead of __init__, as manually initializing
# this class is not supported API.
@classmethod
def _new(cls, x, /):
"""
This is a private method for initializing the array API Array
object.
Functions outside of the array_api submodule should not use this
method. Use one of the creation functions instead, such as
``asarray``.
"""
obj = super().__new__(cls)
# Note: The spec does not have array scalars, only 0-D arrays.
if isinstance(x, np.generic):
# Convert the array scalar to a 0-D array
x = np.asarray(x)
if x.dtype not in _all_dtypes:
raise TypeError(
f"The array_api namespace does not support the dtype '{x.dtype}'"
)
obj._array = x
return obj
# Prevent Array() from working
def __new__(cls, *args, **kwargs):
raise TypeError(
"The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
)
# These functions are not required by the spec, but are implemented for
# the sake of usability.
def __str__(self: Array, /) -> str:
"""
Performs the operation __str__.
"""
return self._array.__str__().replace("array", "Array")
def __repr__(self: Array, /) -> str:
"""
Performs the operation __repr__.
"""
suffix = f", dtype={self.dtype.name})"
if 0 in self.shape:
prefix = "empty("
mid = str(self.shape)
else:
prefix = "Array("
mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
return prefix + mid + suffix
# This function is not required by the spec, but we implement it here for
# convenience so that np.asarray(np.array_api.Array) will work.
def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
"""
Warning: this method is NOT part of the array API spec. Implementers
of other libraries need not include it, and users should not assume it
will be present in other implementations.
"""
return np.asarray(self._array, dtype=dtype)
# These are various helper functions to make the array behavior match the
# spec in places where it either deviates from or is more strict than
# NumPy behavior
def _check_allowed_dtypes(self, other, dtype_category, op):
"""
Helper function for operators to only allow specific input dtypes
Use like
other = self._check_allowed_dtypes(other, 'numeric', '__add__')
if other is NotImplemented:
return other
"""
if self.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
if isinstance(other, (int, float, bool)):
other = self._promote_scalar(other)
elif isinstance(other, Array):
if other.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
else:
return NotImplemented
# This will raise TypeError for type combinations that are not allowed
# to promote in the spec (even if the NumPy array operator would
# promote them).
res_dtype = _result_type(self.dtype, other.dtype)
if op.startswith("__i"):
# Note: NumPy will allow in-place operators in some cases where
# the type promoted operator does not match the left-hand side
# operand. For example,
# >>> a = np.array(1, dtype=np.int8)
# >>> a += np.array(1, dtype=np.int16)
# The spec explicitly disallows this.
if res_dtype != self.dtype:
raise TypeError(
f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
)
return other
# Helper function to match the type promotion rules in the spec
def _promote_scalar(self, scalar):
"""
Returns a promoted version of a Python scalar appropriate for use with
operations on self.
This may raise an OverflowError in cases where the scalar is an
integer that is too large to fit in a NumPy integer dtype, or
TypeError when the scalar type is incompatible with the dtype of self.
"""
if isinstance(scalar, bool):
if self.dtype not in _boolean_dtypes:
raise TypeError(
"Python bool scalars can only be promoted with bool arrays"
)
elif isinstance(scalar, int):
if self.dtype in _boolean_dtypes:
raise TypeError(
"Python int scalars cannot be promoted with bool arrays"
)
elif isinstance(scalar, float):
if self.dtype not in _floating_dtypes:
raise TypeError(
"Python float scalars can only be promoted with floating-point arrays."
)
else:
raise TypeError("'scalar' must be a Python scalar")
# Note: the spec only specifies integer-dtype/int promotion
# behavior for integers within the bounds of the integer dtype.
# Outside of those bounds we use the default NumPy behavior (either
# cast or raise OverflowError).
return Array._new(np.array(scalar, self.dtype))
@staticmethod
def _normalize_two_args(x1, x2):
"""
Normalize inputs to two arg functions to fix type promotion rules
NumPy deviates from the spec type promotion rules in cases where one
argument is 0-dimensional and the other is not. For example:
>>> import numpy as np
>>> a = np.array([1.0], dtype=np.float32)
>>> b = np.array(1.0, dtype=np.float64)
>>> np.add(a, b) # The spec says this should be float64
array([2.], dtype=float32)
To fix this, we add a dimension to the 0-dimension array before passing it
through. This works because a dimension would be added anyway from
broadcasting, so the resulting shape is the same, but this prevents NumPy
from not promoting the dtype.
"""
# Another option would be to use signature=(x1.dtype, x2.dtype, None),
# but that only works for ufuncs, so we would have to call the ufuncs
# directly in the operator methods. One should also note that this
# sort of trick wouldn't work for functions like searchsorted, which
# don't do normal broadcasting, but there aren't any functions like
# that in the array API namespace.
if x1.ndim == 0 and x2.ndim != 0:
# The _array[None] workaround was chosen because it is relatively
# performant. broadcast_to(x1._array, x2.shape) is much slower. We
# could also manually type promote x2, but that is more complicated
# and about the same performance as this.
x1 = Array._new(x1._array[None])
elif x2.ndim == 0 and x1.ndim != 0:
x2 = Array._new(x2._array[None])
return (x1, x2)
# Note: A large fraction of allowed indices are disallowed here (see the
# docstring below)
@staticmethod
def _validate_index(key, shape):
"""
Validate an index according to the array API.
The array API specification only requires a subset of indices that are
supported by NumPy. This function will reject any index that is
allowed by NumPy but not required by the array API specification. We
always raise ``IndexError`` on such indices (the spec does not require
any specific behavior on them, but this makes the NumPy array API
namespace a minimal implementation of the spec). See
https://data-apis.org/array-api/latest/API_specification/indexing.html
for the full list of required indexing behavior
This function either raises IndexError if the index ``key`` is
invalid, or a new key to be used in place of ``key`` in indexing. It
only raises ``IndexError`` on indices that are not already rejected by
NumPy, as NumPy will already raise the appropriate error on such
indices. ``shape`` may be None, in which case, only cases that are
independent of the array shape are checked.
The following cases are allowed by NumPy, but not specified by the array
API specification:
- Indices to not include an implicit ellipsis at the end. That is,
every axis of an array must be explicitly indexed or an ellipsis
included.
- The start and stop of a slice may not be out of bounds. In
particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
following are allowed:
- ``i`` or ``j`` omitted (``None``).
- ``-n <= i <= max(0, n - 1)``.
- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
- Boolean array indices are not allowed as part of a larger tuple
index.
- Integer array indices are not allowed (with the exception of 0-D
arrays, which are treated the same as scalars).
Additionally, it should be noted that indices that would return a
scalar in NumPy will return a 0-D array. Array scalars are not allowed
in the specification, only 0-D arrays. This is done in the
``Array._new`` constructor, not this function.
"""
if isinstance(key, slice):
if shape is None:
return key
if shape == ():
return key
if len(shape) > 1:
raise IndexError(
"Multidimensional arrays must include an index for every axis or use an ellipsis"
)
size = shape[0]
# Ensure invalid slice entries are passed through.
if key.start is not None:
try:
operator.index(key.start)
except TypeError:
return key
if not (-size <= key.start <= size):
raise IndexError(
"Slices with out-of-bounds start are not allowed in the array API namespace"
)
if key.stop is not None:
try:
operator.index(key.stop)
except TypeError:
return key
step = 1 if key.step is None else key.step
if (step > 0 and not (-size <= key.stop <= size)
or step < 0 and not (-size - 1 <= key.stop <= max(0, size - 1))):
raise IndexError("Slices with out-of-bounds stop are not allowed in the array API namespace")
return key
elif isinstance(key, tuple):
key = tuple(Array._validate_index(idx, None) for idx in key)
for idx in key:
if (
isinstance(idx, np.ndarray)
and idx.dtype in _boolean_dtypes
or isinstance(idx, (bool, np.bool_))
):
if len(key) == 1:
return key
raise IndexError(
"Boolean array indices combined with other indices are not allowed in the array API namespace"
)
if isinstance(idx, tuple):
raise IndexError(
"Nested tuple indices are not allowed in the array API namespace"
)
if shape is None:
return key
n_ellipsis = key.count(...)
if n_ellipsis > 1:
return key
ellipsis_i = key.index(...) if n_ellipsis else len(key)
for idx, size in list(zip(key[:ellipsis_i], shape)) + list(
zip(key[:ellipsis_i:-1], shape[:ellipsis_i:-1])
):
Array._validate_index(idx, (size,))
if n_ellipsis == 0 and len(key) < len(shape):
raise IndexError(
"Multidimensional arrays must include an index for every axis or use an ellipsis"
)
return key
elif isinstance(key, bool):
return key
elif isinstance(key, Array):
if key.dtype in _integer_dtypes:
if key.ndim != 0:
raise IndexError(
"Non-zero dimensional integer array indices are not allowed in the array API namespace"
)
return key._array
elif key is Ellipsis:
return key
elif key is None:
raise IndexError(
"newaxis indices are not allowed in the array API namespace"
)
try:
key = operator.index(key)
if shape is not None and len(shape) > 1:
raise IndexError(
"Multidimensional arrays must include an index for every axis or use an ellipsis"
)
return key
except TypeError:
# Note: This also omits boolean arrays that are not already in
# Array() form, like a list of booleans.
raise IndexError(
"Only integers, slices (`:`), ellipsis (`...`), and boolean arrays are valid indices in the array API namespace"
)
# Everything below this line is required by the spec.
def __abs__(self: Array, /) -> Array:
"""
Performs the operation __abs__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __abs__")
res = self._array.__abs__()
return self.__class__._new(res)
def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __add__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__add__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__add__(other._array)
return self.__class__._new(res)
def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __and__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__and__(other._array)
return self.__class__._new(res)
def __array_namespace__(
self: Array, /, *, api_version: Optional[str] = None
) -> Any:
if api_version is not None and not api_version.startswith("2021."):
raise ValueError(f"Unrecognized array API version: {api_version!r}")
return array_api
def __bool__(self: Array, /) -> bool:
"""
Performs the operation __bool__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("bool is only allowed on arrays with 0 dimensions")
if self.dtype not in _boolean_dtypes:
raise ValueError("bool is only allowed on boolean arrays")
res = self._array.__bool__()
return res
def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
"""
Performs the operation __dlpack__.
"""
return self._array.__dlpack__(stream=stream)
def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
"""
Performs the operation __dlpack_device__.
"""
# Note: device support is required for this
return self._array.__dlpack_device__()
def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __eq__.
"""
# Even though "all" dtypes are allowed, we still require them to be
# promotable with each other.
other = self._check_allowed_dtypes(other, "all", "__eq__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__eq__(other._array)
return self.__class__._new(res)
def __float__(self: Array, /) -> float:
"""
Performs the operation __float__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("float is only allowed on arrays with 0 dimensions")
if self.dtype not in _floating_dtypes:
raise ValueError("float is only allowed on floating-point arrays")
res = self._array.__float__()
return res
def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __floordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__floordiv__(other._array)
return self.__class__._new(res)
def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ge__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ge__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ge__(other._array)
return self.__class__._new(res)
def __getitem__(
self: Array,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
/,
) -> Array:
"""
Performs the operation __getitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
key = self._validate_index(key, self.shape)
res = self._array.__getitem__(key)
return self._new(res)
def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __gt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__gt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__gt__(other._array)
return self.__class__._new(res)
def __int__(self: Array, /) -> int:
"""
Performs the operation __int__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("int is only allowed on arrays with 0 dimensions")
if self.dtype not in _integer_dtypes:
raise ValueError("int is only allowed on integer arrays")
res = self._array.__int__()
return res
def __index__(self: Array, /) -> int:
"""
Performs the operation __index__.
"""
res = self._array.__index__()
return res
def __invert__(self: Array, /) -> Array:
"""
Performs the operation __invert__.
"""
if self.dtype not in _integer_or_boolean_dtypes:
raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
res = self._array.__invert__()
return self.__class__._new(res)
def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __le__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__le__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__le__(other._array)
return self.__class__._new(res)
def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __lshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__lshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lshift__(other._array)
return self.__class__._new(res)
def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __lt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__lt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lt__(other._array)
return self.__class__._new(res)
def __matmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __matmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
if other is NotImplemented:
return other
res = self._array.__matmul__(other._array)
return self.__class__._new(res)
def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mod__(other._array)
return self.__class__._new(res)
def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mul__(other._array)
return self.__class__._new(res)
def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __ne__.
"""
other = self._check_allowed_dtypes(other, "all", "__ne__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ne__(other._array)
return self.__class__._new(res)
def __neg__(self: Array, /) -> Array:
"""
Performs the operation __neg__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __neg__")
res = self._array.__neg__()
return self.__class__._new(res)
def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __or__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__or__(other._array)
return self.__class__._new(res)
def __pos__(self: Array, /) -> Array:
"""
Performs the operation __pos__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __pos__")
res = self._array.__pos__()
return self.__class__._new(res)
# PEP 484 requires int to be a subtype of float, but __pow__ should not
# accept int.
def __pow__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __pow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "floating-point", "__pow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow type promotion rules for 0-d
# arrays, so we use pow() here instead.
return pow(self, other)
def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rshift__(other._array)
return self.__class__._new(res)
def __setitem__(
self,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
value: Union[int, float, bool, Array],
/,
) -> None:
"""
Performs the operation __setitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
key = self._validate_index(key, self.shape)
self._array.__setitem__(key, asarray(value)._array)
def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __sub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__sub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__sub__(other._array)
return self.__class__._new(res)
# PEP 484 requires int to be a subtype of float, but __truediv__ should
# not accept int.
def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __truediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__truediv__(other._array)
return self.__class__._new(res)
def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __xor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__xor__(other._array)
return self.__class__._new(res)
def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __iadd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
if other is NotImplemented:
return other
self._array.__iadd__(other._array)
return self
def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __radd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__radd__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__radd__(other._array)
return self.__class__._new(res)
def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __iand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
if other is NotImplemented:
return other
self._array.__iand__(other._array)
return self
def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rand__(other._array)
return self.__class__._new(res)
def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ifloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
if other is NotImplemented:
return other
self._array.__ifloordiv__(other._array)
return self
def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rfloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rfloordiv__(other._array)
return self.__class__._new(res)
def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __ilshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
if other is NotImplemented:
return other
self._array.__ilshift__(other._array)
return self
def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rlshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rlshift__(other._array)
return self.__class__._new(res)
def __imatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __imatmul__.
"""
# Note: NumPy does not implement __imatmul__.
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
if other is NotImplemented:
return other
# __imatmul__ can only be allowed when it would not change the shape
# of self.
other_shape = other.shape
if self.shape == () or other_shape == ():
raise ValueError("@= requires at least one dimension")
if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
raise ValueError("@= cannot change the shape of the input array")
self._array[:] = self._array.__matmul__(other._array)
return self
def __rmatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __rmatmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
if other is NotImplemented:
return other
res = self._array.__rmatmul__(other._array)
return self.__class__._new(res)
def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imod__")
if other is NotImplemented:
return other
self._array.__imod__(other._array)
return self
def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmod__(other._array)
return self.__class__._new(res)
def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imul__")
if other is NotImplemented:
return other
self._array.__imul__(other._array)
return self
def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmul__(other._array)
return self.__class__._new(res)
def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ior__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
if other is NotImplemented:
return other
self._array.__ior__(other._array)
return self
def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ror__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ror__(other._array)
return self.__class__._new(res)
def __ipow__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __ipow__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__ipow__")
if other is NotImplemented:
return other
self._array.__ipow__(other._array)
return self
def __rpow__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __rpow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "floating-point", "__rpow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow the spec type promotion rules
# for 0-d arrays, so we use pow() here instead.
return pow(other, self)
def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __irshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__irshift__")
if other is NotImplemented:
return other
self._array.__irshift__(other._array)
return self
def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rrshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rrshift__(other._array)
return self.__class__._new(res)
def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __isub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__isub__")
if other is NotImplemented:
return other
self._array.__isub__(other._array)
return self
def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rsub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rsub__(other._array)
return self.__class__._new(res)
def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __itruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
if other is NotImplemented:
return other
self._array.__itruediv__(other._array)
return self
def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __rtruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rtruediv__(other._array)
return self.__class__._new(res)
def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ixor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
if other is NotImplemented:
return other
self._array.__ixor__(other._array)
return self
def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rxor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rxor__(other._array)
return self.__class__._new(res)
def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == 'cpu':
return self
raise ValueError(f"Unsupported device {device!r}")
@property
def dtype(self) -> Dtype:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
See its docstring for more information.
"""
return self._array.dtype
@property
def device(self) -> Device:
return "cpu"
# Note: mT is new in array API spec (see matrix_transpose)
@property
def mT(self) -> Array:
from .linalg import matrix_transpose
return matrix_transpose(self)
@property
def ndim(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
See its docstring for more information.
"""
return self._array.ndim
@property
def shape(self) -> Tuple[int, ...]:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
See its docstring for more information.
"""
return self._array.shape
@property
def size(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
See its docstring for more information.
"""
return self._array.size
@property
def T(self) -> Array:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
See its docstring for more information.
"""
# Note: T only works on 2-dimensional arrays. See the corresponding
# note in the specification:
# https://data-apis.org/array-api/latest/API_specification/array_object.html#t
if self.ndim != 2:
raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
return self.__class__._new(self._array.T)
|
|
import unittest
from nose.tools import eq_, ok_
from mock import patch, Mock
from docar.backends import BackendManager
from docar.backends.django import DjangoBackendManager
from docar import Document, Collection, fields
class when_a_django_backend_manager_gets_instantiated(unittest.TestCase):
# def it_can_fetch_save_and_delete_to_the_specific_backend_manager(self):
# with patch('docar.backends.django.DjangoBackendManager') as mock:
# mock_manager = Mock()
# mock_manager = mock.return_value
# manager = BackendManager('django')
# # first assert that the manager is really mocked
# ok_(isinstance(manager, Mock))
# manager.fetch()
# manager.save()
# manager.delete()
# eq_(True, mock_manager.fetch.called)
# eq_(True, mock_manager.save.called)
# eq_(True, mock_manager.delete.called)
def it_returns_a_dict_representation_of_the_instance(self):
Doc2Model = Mock(name="Doc2Model")
mock_doc2_model = Mock(name="mock_model_instance",
spec_set=['name', 'choice', 'related', 'coll', 'nonexistent',
'overwrite', 'overwritten'])
mock_related = Mock()
mock_related.name = 'related name'
mock_m2m = Mock(name="mock_m2m")
# return an empty list to have an iterator
mock_m2m.all.return_value = []
mock_doc2_model.name = "value"
mock_doc2_model.choice = "True"
mock_doc2_model.related = mock_related
mock_doc2_model.coll = mock_m2m
# the following instance field will be ingored but the to_dict method.
mock_doc2_model.nonexistent = "whatever"
# The following field will get overwritten
mock_doc2_model.overwritten = 'jaja'
Doc2Model.objects.get.return_value = mock_doc2_model
class Doc(Document):
name = fields.StringField()
class Meta:
backend_type = 'django'
identifier = 'name'
model = Mock()
class Coll(Collection):
document = Doc
class Doc2(Document):
name = fields.StringField()
choice = fields.BooleanField()
related = fields.ForeignDocument(Doc)
coll = fields.CollectionField(Coll)
skipped = fields.NumberField()
overwrite = fields.StringField()
class Meta:
model = Doc2Model
backend_type = 'django'
identifier = 'name'
def map_overwrite_field(self):
return 'overwritten'
expected = {
'name': 'value',
'choice': 'True',
'related': {'name': 'related name'},
'coll': [],
'skipped': None,
'overwrite': 'jaja'
}
doc = Doc2({'name': 'value'})
doc._backend_manager.instance = mock_doc2_model
eq_(expected, doc._backend_manager._to_dict(doc))
def it_can_fetch_data_from_the_underlying_model(self):
DjangoModel = Mock(name="DjangoModel")
mock_model = Mock()
mock_model.id = 1
DjangoModel.objects.get.return_value = mock_model
manager = BackendManager('django')
# The manager needs to know which model it connects to
# This is normally done when the Document is created.
manager._model = DjangoModel
doc = Mock(name="mock_document", spec=Document)
field = fields.NumberField()
field.name = "id"
doc.id = 1
doc._context = {}
doc._get_context.return_value = {}
doc._meta.identifier = ["id"]
doc._identifier_state.return_value = {"id": 1}
doc._save.return_value = {"id": 1}
doc._meta.local_fields = [field]
# make sure we are working with correct expectations
eq_(DjangoBackendManager, type(manager))
eq_({'id': 1}, manager.fetch(doc))
eq_([('objects.get', {'id': 1})], DjangoModel.method_calls)
def it_can_save_data_to_the_underlying_model(self):
DjangoModel = Mock(name="DjangoModel")
mock_model = Mock()
DjangoModel.objects.get_or_create.return_value = (mock_model, False)
manager = BackendManager('django')
# The manager needs to know which model it connects to
# This is normally done when the Document is created.
manager._model = DjangoModel
# make sure we are working with correct expectations
eq_(DjangoBackendManager, type(manager))
doc = Mock(name="mock_document", spec=Document)
field = fields.NumberField()
field.name = "id"
doc.id = 1
doc._context = {}
doc._get_context.return_value = {}
doc._meta.identifier = ["id"]
doc._identifier_state.return_value = {"id": 1}
doc._save.return_value = {"id": 1}
doc._meta.local_fields = [field]
# the manager.save() method doesn't return on success
manager.save(doc)
eq_([('objects.get', {'id': 1})], DjangoModel.method_calls)
def it_can_delete_the_underlying_model_instance(self):
DjangoModel = Mock(name="DjangoModel")
mock_model = Mock()
mock_model.id = 1
DjangoModel.objects.get.return_value = mock_model
manager = BackendManager('django')
# The manager needs to know which model it connects to
# This is normally done when the Document is created.
manager._model = DjangoModel
# make sure we are working with correct expectations
eq_(DjangoBackendManager, type(manager))
class Doc(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = DjangoModel
doc = Doc({'id': 1})
manager.delete(doc)
eq_([('objects.get', {'id': 1})], DjangoModel.method_calls)
eq_([('delete',)], mock_model.method_calls)
# If the model does not exist, nothing happens
DjangoModel.reset_mock()
mock_model.reset_mock()
DjangoModel.DoesNotExist = Exception
DjangoModel.objects.get.side_effect = DjangoModel.DoesNotExist
manager.delete(doc)
eq_([('objects.get', {'id': 1})], DjangoModel.method_calls)
# no method of the model should have been called
eq_([], mock_model.method_calls)
def it_can_return_a_django_m2m_relationship_as_collection(self):
DjangoModel = Mock(name="DjangoModel")
mock_model = Mock()
mock_model.id = 1
mock_model.get_absolute_url.return_value = "A"
OtherModel = Mock(name="OtherMock")
mock1 = Mock()
mock1.id = 1
mock1.get_absolute_url.return_value = "1"
mock2 = Mock()
mock2.id = 2
mock2.get_absolute_url.return_value = "2"
# This mocks a many2many relation ship, its not a queryset, just a list
mock_model.others.all.return_value = [mock1, mock2]
x = [mock2, mock1]
def mock_side_effect(*args, **kwargs):
return x.pop()
OtherModel.objects.get.side_effect = mock_side_effect
DjangoModel.objects.get.return_value = mock_model
# Now create a simple document setup
class OtherDoc(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
identifier = 'id'
model = OtherModel
class OtherCollection(Collection):
document = OtherDoc
class Doc(Document):
id = fields.NumberField()
others = fields.CollectionField(OtherCollection)
class Meta:
backend_type = 'django'
identifier = 'id'
model = DjangoModel
manager = BackendManager('django')
# The manager needs to know which model it connects to
# This is normally done when the Document is created.
manager._model = DjangoModel
# make sure we are working with correct expectations
eq_(DjangoBackendManager, type(manager))
doc = Doc({'id': 1})
#doc.fetch()
expected = {
'id': 1,
'others': [{'id': 1}, {'id': 2}]}
eq_(expected, manager.fetch(doc))
def it_saves_collections_as_m2m_relations(self):
# prepare the app structure
Doc1Model = Mock(name="doc1_model")
class Doc1(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = Doc1Model
class Doc1Col(Collection):
document = Doc1
Doc2Model = Mock(name="doc2_model")
class Doc2(Document):
id = fields.NumberField()
# The bool field tests an edge case where boolean fields with a
# default value of False are ignored unfortunately
bool = fields.BooleanField(default=False)
col = fields.CollectionField(Doc1Col)
class Meta:
backend_type = 'django'
model = Doc2Model
request = {
"id": 1,
"col": [
{"id": 1},
{"id": 2}
]
}
# now mock the underlying model store
mock_doc2 = Mock(name="mock_doc2_model")
mock_doc1_1 = Mock(name="mock_doc1_1")
mock_doc1_2 = Mock(name="mock_doc1_2")
Doc2Model.objects.get.return_value = mock_doc2
collection_model = [mock_doc1_2, mock_doc1_1]
def se(*args, **kwargs):
return collection_model.pop()
# I have to mock a queryset for each collection, cause I run an exclude
# on it everytime I iterate through an item of the collection
qs_col = Mock(name="queryset")
qs_col.__len__ = Mock(return_value=2)
qs_col.__getitem__ = Mock()
qs_col.__iter__ = Mock(
return_value=iter([mock_doc1_1, mock_doc1_2]))
qs_cola = Mock(name="queryset-exclude_cola")
qs_cola.__len__ = Mock(return_value=1)
qs_cola.__getitem__ = Mock()
qs_cola.__iter__ = Mock(return_value=iter([mock_doc1_2]))
qs_col.exclude.return_value = qs_cola
qs_colb = Mock(name="queryset-exclude_colb")
qs_colb.__len__ = Mock(return_value=1)
qs_colb.__getitem__ = Mock()
qs_colb.__iter__ = Mock(return_value=iter([]))
qs_cola.exclude.return_value = qs_colb
mock_doc2.col = Mock()
mock_doc2.col.__dict__['model'] = Doc1Model
mock_doc2.col.get.side_effect = se
mock_doc2.col.get.return_value = True
mock_doc2.col.all.return_value = qs_col
doc = Doc2(request)
eq_(2, len(doc.col.collection_set))
doc.save()
eq_(True, mock_doc2.col.get.called)
eq_(True, Doc2Model.objects.get.called)
Doc2Model.objects.get.assert_called_once_with(id=1)
def it_supplies_the_foreign_model_instance_when_saving_a_foreign_key(self):
# prepare the app structure
Doc1Model = Mock(name="doc1_model")
class Doc1(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = Doc1Model
Doc2Model = Mock(name="doc2_model")
class Doc2(Document):
id = fields.NumberField()
doc1 = fields.ForeignDocument(Doc1)
class Meta:
backend_type = 'django'
model = Doc2Model
# First return an existing model instance
mock_doc1 = Mock()
mock_doc2 = Mock()
Doc1Model.objects.get.return_value = mock_doc1
Doc2Model.objects.get_or_create.return_value = (mock_doc2, True)
request = {
"id": 1,
"doc1": {
"id": 1
}
}
doc = Doc2(request)
doc.save()
ok_(isinstance(doc.doc1, Document))
eq_(True, Doc1Model.objects.get.called)
# Now save a non existing model
mock_doc1.reset_mock()
mock_doc2.reset_mock()
Doc1Model.DoesNotExist = Exception
Doc1Model.objects.get.side_effect = Doc1Model.DoesNotExist
Doc1Model.objects.get_or_create.return_value = (mock_doc1, True)
Doc2Model.objects.get_or_create.return_value = (mock_doc2, True)
request = {
"id": 1,
"doc1": {
"id": 1
}
}
doc = Doc2(request)
doc.save()
ok_(isinstance(doc.doc1, Document))
eq_(True, Doc1Model.objects.get.called)
def it_applies_the_context_to_itself_and_its_foreign_documents(self):
# prepare the app structure
Doc1Model = Mock(name="doc1_model")
Doc1Model.DoesNotExist = Exception
class Doc1(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = Doc1Model
context = ['name']
Doc2Model = Mock(name="doc2_model")
class Doc2(Document):
id = fields.NumberField()
doc1 = fields.ForeignDocument(Doc1)
class Meta:
backend_type = 'django'
model = Doc2Model
context = ['name']
# First return an existing model instance
mock_doc1 = Mock()
mock_doc2 = Mock()
# The fetch for the foreign document will raise a BackendDoesNotExist
# and therefore creates a new model instance
Doc1Model.objects.get.side_effect = Doc1Model.DoesNotExist
Doc1Model.objects.get.return_value = mock_doc1
Doc2Model.objects.get.return_value = mock_doc2
request = {
"id": 2,
"doc1": {
"id": 1
}
}
context = {'name': 'hello'}
doc = Doc2(request, context=context)
doc.save()
eq_([('objects.get', {'id': 1, 'name': 'hello'}),
('objects.get', {'id': 1, 'name': 'hello'})
], Doc1Model.method_calls)
#eq_([('objects.get', {'id': 1, 'name': 'hello'}),
# ], Doc1Model.method_calls)
Doc2Model.objects.get.assert_called_once_with(id=2,
name='hello')
def it_can_save_nested_collections_on_the_django_backend(self):
# Prepare an environment where you have a collection nesting another
# collection
Doc1Model = Mock(name="DjangoModel1")
class Doc1(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = Doc1Model
class Doc1Collection(Collection):
document = Doc1
Doc2Model = Mock(name="DjangoModel2")
class Doc2(Document):
id = fields.NumberField()
name = fields.StringField()
doc1 = fields.CollectionField(Doc1Collection)
class Meta:
backend_type = 'django'
model = Doc2Model
def map_doc1_field(self):
return "doc1_map"
class Doc2Collection(Collection):
document = Doc2
Doc3Model = Mock(name="DjangoModel3")
class Doc3(Document):
id = fields.NumberField()
doc2 = fields.CollectionField(Doc2Collection)
class Meta:
backend_type = 'django'
model = Doc3Model
post_data = {
'id': 1,
'doc2': [{
'id': 2,
'name': 'miss kitty',
'doc1': [
{'id': 3},
]
}]
}
# create the document structure
doc3 = Doc3(post_data)
# and verify some expectations
ok_(isinstance(doc3.doc2, Doc2Collection))
eq_(1, len(doc3.doc2.collection_set))
ok_(isinstance(doc3.doc2.collection_set[0], Doc2))
temp_doc = doc3.doc2.collection_set[0]
eq_('miss kitty', temp_doc.name)
ok_(isinstance(temp_doc.doc1, Doc1Collection))
# Now mock the django backend properly
Doc1Model.DoesNotExist = Exception
Doc2Model.DoesNotExist = Exception
Doc3Model.DoesNotExist = Exception
Doc3Model.objects.get.side_effect = Doc3Model.DoesNotExist
mock_doc3 = Mock(name="Doc3")
mock_doc3.id = 1
mock_doc3.doc2 = Mock()
mock_doc3.doc2.__dict__['model'] = Doc2Model
Doc3Model.return_value = mock_doc3
mock_doc2 = Mock()
mock_doc2.__dict__['model'] = Doc2Model
mock_doc2.id = 2
mock_doc2.doc1_map = Mock(name='doc1_map')
mock_doc2.doc1_map.__dict__['model'] = Doc1Model
# I have to mock a queryset for each collection, cause I run an exclude
# on it everytime I iterate through an item of the collection
qs_doc2 = Mock(name="queryset2")
qs_doc2.__len__ = Mock(return_value=1)
qs_doc2.__getitem__ = Mock()
qs_doc2.__iter__ = Mock(
return_value=iter([mock_doc2]))
qs_doc2a = Mock(name="queryset-exclude_doc2")
qs_doc2a.__iter__ = Mock(return_value=iter([]))
qs_doc2.exclude.return_value = qs_doc2a
mock_doc3.doc2.all.return_value = qs_doc2
mock_doc3.doc2.create.return_value = mock_doc2
mock_doc3.doc2.get.return_value = mock_doc2
mock_doc1 = Mock()
mock_doc1.__dict__['model'] = Doc1Model
mock_doc1.id = 3
qs_doc1 = Mock(name="queryset1")
qs_doc1.__len__ = Mock(return_value=1)
qs_doc1.__getitem__ = Mock()
qs_doc1.__iter__ = Mock(
return_value=iter([mock_doc1]))
qs_doc1a = Mock(name="queryset-exclude_doc1")
qs_doc1a.__len__ = Mock(return_value=0)
qs_doc1a.__getitem__ = Mock()
qs_doc1a.__iter__ = Mock(return_value=iter([]))
qs_doc1.exclude.return_value = qs_doc1a
mock_doc2.doc1_map.all.return_value = qs_doc1
mock_doc2.doc1_map.get.return_value = mock_doc1
mock_doc2.doc1_map.create.return_value = mock_doc1
# saving the model should create all nested relations too
doc3.save()
# make sure the right methods have been called.
ok_(mock_doc2.doc1_map.get.called)
ok_(mock_doc3.doc2.get.called)
ok_(Doc3Model.called)
def it_calls_the_fetch_field_method_when_saving(self):
DocModel = Mock()
class Doc(Document):
id = fields.NumberField()
name = fields.StringField()
class Meta:
backend_type = 'django'
model = DocModel
def map_name_field(self):
return "mapped_name"
# prepare the django backend
DocModel.DoesNotExist = Exception
DocModel.objects.get.side_effect = DocModel.DoesNotExist
mock_doc = Mock()
DocModel.return_value = mock_doc
# create and save the document
doc = Doc({'id': 1, 'name': 'docname'})
manager = BackendManager('django')
manager._model = DocModel
manager.save(doc)
# The save should have set this attribute on the django model as
# defined by the map_field method
eq_(True, hasattr(mock_doc, 'mapped_name'))
# and also have the attribute set to the right value
eq_('docname', mock_doc.mapped_name)
def it_can_limit_the_choice_of_context_variables_given_to_it(self):
Model1 = Mock()
Model2 = Mock()
class Doc1(Document):
id = fields.NumberField()
name1 = fields.StringField()
class Meta:
backend_type = 'django'
model = Model1
context = ['name1']
class Doc2(Document):
id = fields.NumberField()
name1 = fields.StringField()
name2 = fields.StringField()
doc1 = fields.ForeignDocument(Doc1)
class Meta:
backend_type = 'django'
model = Model2
context = ['name1', 'name2']
# First return an existing model instance
mock_doc1 = Mock()
mock_doc1.id = 1
mock_doc1.name1 = "name1"
mock_doc2 = Mock()
mock_doc2.id = 2
mock_doc2.name1 = "name1"
mock_doc2.name2 = "name2"
mock_doc2.doc1 = mock_doc1
# The fetch for the foreign document will raise a BackendDoesNotExist
# and therefore creates a new model instance
Model1.objects.get.return_value = mock_doc1
Model2.objects.get.return_value = mock_doc2
doc2 = Doc2({'id': 1}, context={'name1': 'name1', 'name2': 'name2'})
doc2.fetch()
doc2.save()
#Model1.objects.get.assert_called_with(id=1, name1="name1")
Model2.objects.get.assert_called_with(id=2, name1="name1",
name2="name2")
def it_can_delete_items_from_a_m2m_relation(self):
Model1 = Mock()
Model2 = Mock()
class Doc1(Document):
id = fields.NumberField()
class Meta:
backend_type = 'django'
model = Model1
class Col1(Collection):
document = Doc1
class Doc2(Document):
id = fields.NumberField()
col1 = fields.CollectionField(Col1)
class Meta:
backend_type = 'django'
model = Model2
# First return an existing model instance
mock_doc1a = Mock()
mock_doc1a.id = 1
mock_doc1a.__dict__['model'] = Model1
mock_doc1b = Mock()
mock_doc1b.id = 2
mock_doc1b.__dict__['model'] = Model1
m2m_relation = Mock(name="m2m_relation")
mock_doc2 = Mock()
mock_doc2.col1 = m2m_relation
mock_doc2.id = 3
mock_doc2.col1.__dict__['model'] = Model1
# The fetch for the foreign document will raise a BackendDoesNotExist
# and therefore creates a new model instance
Model2.objects.get.return_value = mock_doc2
Queryset = Mock(name="queryset")
Queryset.__len__ = Mock(return_value=2)
Queryset.__getitem__ = Mock()
Queryset.__iter__ = Mock(
return_value=iter([mock_doc1a, mock_doc1b]))
m2m_relation.all.return_value = Queryset
doc = Doc2({'id': 1, 'col1':[]})
doc.save()
# If the collection is empty we make sure that the backend instances
# are delete too
eq_(True, mock_doc1a.delete.called)
eq_(True, mock_doc1b.delete.called)
mock_doc1a.reset_mock()
mock_doc1b.reset_mock()
m2m_relation.all.reset_mock()
Queryset1 = Mock(name="queryset1")
Queryset1.__len__ = Mock(return_value=2)
Queryset1.__getitem__ = Mock()
Queryset1.__iter__ = Mock(
return_value=iter([mock_doc1a, mock_doc1b]))
Queryset2 = Mock(name="queryset2")
Queryset2.__len__ = Mock(return_value=1)
Queryset2.__getitem__ = Mock()
Queryset2.__iter__ = Mock(
return_value=iter([mock_doc1b]))
m2m_relation.all.return_value = Queryset1
Queryset1.exclude.return_value = Queryset2
doc = Doc2({'id': 1, 'col1':[{'id':1}]})
doc.save()
m2m_relation.get.assert_called_once_with(id=1)
eq_(True, mock_doc1b.delete.called)
def it_can_supply_context_to_foreign_documents_within_nested_collections(self):
# FIXME: Add a test for this use case
pass
|
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Automated test suites with unittest
run "python -m unittest -v test" in the module directory to run the tests
The clock decorator in utils will measure the run time of the test
"""
#########################################################
# Import Packages and helpers
#########################################################
import unittest
# internal helpers
# from autoc.utils.helpers import clock, create_test_df, removena_numpy, cserie
from autoc.utils.helpers import random_pmf, clock, create_test_df, cserie, simu, removena_numpy
from autoc.utils.getdata import get_dataset
from autoc.explorer import DataExploration
from autoc.naimputer import NaImputer
from autoc.outliersdetection import OutliersDetection
import pandas as pd
import numpy as np
flatten_list = lambda x: [y for l in x for y in flatten_list(
l)] if isinstance(x, list) else [x]
# flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x]
#########################################################
# Writing the tests
#########################################################
class TestDataExploration(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_df = create_test_df()
cls._test_dc = DataExploration(data=cls._test_df)
@clock
def test_to_lowercase(self):
df_lower = self._test_dc.to_lowercase()
self.assertNotEqual(id(df_lower), id(self._test_dc.data))
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['c'] * 300)==
df_lower.loc[:, 'character_variable_up1']).all())
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['d'] * 300)==
df_lower.loc[:, 'character_variable_up2']).all())
@clock
def test_copy(self):
exploration_copy = DataExploration(data=create_test_df(), copy=True)
self.assertEqual(id(self._test_df), id(self._test_dc.data))
self.assertNotEqual(id(self._test_df), id(exploration_copy.data))
@clock
def test_cserie(self):
char_var = cserie(self._test_dc.data.dtypes == "object")
self.assertIsInstance(char_var, list)
self.assertIn('character_variable', char_var)
@clock
def test_removena_numpy(self):
test_array = np.array([np.nan, 1, 2, np.nan])
self.assertTrue((removena_numpy(test_array) == np.array([1, 2])).all())
@clock
def test_sample_df(self):
self.assertEqual(len(self._test_dc.sample_df(pct=0.061)),
0.061 * float(self._test_dc.data.shape[0]))
@clock
def test_nrow(self):
self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0])
@clock
def test_col(self):
self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1])
@clock
def test_is_numeric(self):
self.assertTrue(self._test_dc.is_numeric("num_variable"))
self.assertTrue(self._test_dc.is_numeric("many_missing_70"))
self.assertFalse(self._test_dc.is_numeric("character_variable"))
@clock
def test_is_int_factor(self):
self.assertFalse(self._test_dc.is_int_factor("num_variable"))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.01))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.1))
self.assertFalse(self._test_dc.is_int_factor("int_factor_10", 0.005))
self.assertFalse(self._test_dc.is_int_factor("character_variable"))
@clock
def test_where_numeric(self):
self.assertEqual(cserie(self._test_dc.where_numeric().all()), self._test_dc._dfnum)
@clock
def test_total_missing(self):
self.assertEqual(self._test_dc.total_missing,
self._test_dc.data.isnull().sum().sum())
@clock
def test_None_count(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['None_100', 'Napercentage'], 0.1)
self.assertEqual(nacolcount.loc['None_100', 'Nanumber'], 100)
self.assertEqual(nacolcount.loc['None_na_200', 'Napercentage'], 0.2)
self.assertEqual(nacolcount.loc['None_na_200', 'Nanumber'], 200)
@clock
def test_nacolcount_capture_na(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0)
self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7)
@clock
def test_nacolcount_is_type_dataframe(self):
self.assertIsInstance(self._test_dc.nacolcount(),
pd.core.frame.DataFrame)
@clock
def test_narowcount_capture_na(self):
narowcount = self._test_dc.narowcount()
self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow)
#
# @clock
# def test_detect_other_na(self):
# other_na = self._test_dc.detect_other_na()
# self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_narowcount_is_type_dataframe(self):
narowcount = self._test_dc.narowcount()
self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_manymissing_capture(self):
manymissing = self._test_dc.manymissing(0.7)
self.assertIsInstance(manymissing, list)
self.assertIn('many_missing_70', manymissing)
self.assertIn('na_col', manymissing)
@clock
def test_nacols_full(self):
nacols_full = self._test_dc.nacols_full
self.assertIsInstance(nacols_full, list)
self.assertIn('na_col',nacols_full )
@clock
def test_narows_full(self):
test_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
test_df.loc[99, :] = np.nan
self.assertIn(99, DataExploration(test_df).narows_full)
self.assertNotIn(1, test_df)
@clock
def test_constant_col_capture(self):
constantcol = self._test_dc.constantcol()
self.assertIsInstance(constantcol, list)
self.assertIn('constant_col', constantcol)
self.assertIn('constant_col_num', constantcol)
self.assertIn('na_col', constantcol)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, 1000)
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.character_factor, 7)
@clock
def test_dfchar_check_col(self):
dfchar = self._test_dc._dfchar
self.assertIsInstance(dfchar, list)
self.assertNotIn('num_variable', dfchar)
self.assertIn('character_factor', dfchar)
self.assertIn('character_variable', dfchar)
self.assertNotIn('many_missing_70', dfchar)
@clock
def test_dfnum_check_col(self):
dfnum = self._test_dc._dfnum
self.assertIsInstance(dfnum, list)
self.assertIn('num_variable', dfnum)
self.assertNotIn('character_factor', dfnum)
self.assertNotIn('character_variable', dfnum)
self.assertIn('many_missing_70', dfnum)
@clock
def test_factors_check_col(self):
factors = self._test_dc.factors()
self.assertIsInstance(factors, list)
self.assertNotIn('num_factor', factors)
self.assertNotIn('character_variable', factors)
self.assertIn('character_factor', factors)
@clock
def test_detectkey_check_col(self):
detectkey = self._test_dc.detectkey()
self.assertIsInstance(detectkey, list)
self.assertIn('id', detectkey)
self.assertIn('member_id', detectkey)
@clock
def test_detectkey_check_col_dropna(self):
detectkeyna = self._test_dc.detectkey(dropna=True)
self.assertIn('id_na', detectkeyna)
self.assertIn('id', detectkeyna)
self.assertIn('member_id', detectkeyna)
@clock
def test_findupcol_check(self):
findupcol = self._test_dc.findupcol()
self.assertIn(['id', 'duplicated_column'], findupcol)
self.assertNotIn('member_id', flatten_list(findupcol))
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, len(self._test_dc.data.id))
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.num_factor, len(
pd.unique(self._test_dc.data.num_factor)))
@clock
def test_structure(self):
structure = self._test_dc.structure()
self.assertIsInstance(structure, pd.DataFrame)
self.assertEqual(len(self._test_dc.data),
structure.loc['na_col', 'nb_missing'])
self.assertEqual(len(self._test_dc.data), structure.loc[
'id', 'nb_unique_values'])
self.assertTrue(structure.loc['id', 'is_key'])
@clock
def test_nearzerovar(self):
nearzerovar = self._test_dc.nearzerovar(save_metrics=True)
self.assertIsInstance(nearzerovar, pd.DataFrame)
self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv))
self.assertIn('constant_col', cserie(nearzerovar.nzv))
self.assertIn('na_col', cserie(nearzerovar.nzv))
class TestNaImputer(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_na = NaImputer(data=create_test_df())
@clock
def test_fillna_serie(self):
test_serie = pd.Series([1, 3, np.nan, 5])
self.assertIsInstance(
self._test_na.fillna_serie(test_serie), pd.Series)
self.assertEqual(self._test_na.fillna_serie(test_serie)[2], 3.0)
@clock
def test_fillna_serie(self):
test_char_variable = self._test_na.fillna_serie('character_variable_fillna')
test_num_variable = self._test_na.fillna_serie('numeric_variable_fillna')
self.assertTrue(test_char_variable.notnull().any())
self.assertTrue(test_num_variable.notnull().any())
self.assertTrue((pd.Series(
['A'] * 300 + ['B'] * 200 + ['C'] * 200 + ['A'] * 300) == test_char_variable).all())
self.assertTrue(
(pd.Series([1] * 400 + [3] * 400 + [2] * 200) == test_num_variable).all())
@clock
def test_fill_low_na(self):
df_fill_low_na = self._test_na.basic_naimputation(columns_to_process=['character_variable_fillna',
'numeric_variable_fillna'])
df_fill_low_na_threshold = self._test_na.basic_naimputation(threshold=0.4)
self.assertIsInstance(df_fill_low_na, pd.DataFrame)
self.assertIsInstance(df_fill_low_na_threshold, pd.DataFrame)
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na.numeric_variable_fillna).all())
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na_threshold.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na_threshold.numeric_variable_fillna).all())
self.assertTrue(
sum(pd.isnull(df_fill_low_na_threshold.many_missing_70)) == 700)
class TestOutliersDetection(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
cls.outlier_d = OutliersDetection(cls.data)
@clock
def test_outlier_detection_serie_1d(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
@clock
def test_outlier_detection_serie_1d_with_na(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier_na', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
class TestHelper(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
@clock
def test_random_pmf(self):
self.assertAlmostEqual(len(random_pmf(10)), 10)
self.assertAlmostEqual(random_pmf(10).sum(), 1)
@clock
def test_simu(self):
pmf = random_pmf(4)
samples_unique = simu((np.array(['A', 'B']), np.array([0, 1])), 10)
self.assertTrue((samples_unique == 'B').all())
# class TestGetData(unittest.TestCase):
#
# @clock
# def test_getdata_titanic(self):
# """ Test if downloading titanic data is working """
# titanic = get_dataset('titanic')
# self.assertIsInstance(titanic, pd.DataFrame)
# self.assertEqual(titanic.shape[0], 891)
# self.assertEqual(titanic.shape[1], 15)
# Adding new tests sets
# def suite():
# suite = unittest.TestSuite()
# suite.addTest(TestPandasPatch('test_default_size'))
# return suite
# Other solution than calling main
#suite = unittest.TestLoader().loadTestsFromTestCase(TestPandasPatch)
#unittest.TextTestRunner(verbosity = 1 ).run(suite)
if __name__ == "__main__":
unittest.main(exit=False)
|
|
#!/usr/bin/python
# posts to youtube
from process import process
import youtube_v3_uploader
import ia_uploader
# import rax_uploader
import os
import pprint
import re
import pw
from django.template.defaultfilters import slugify
# from add_to_richard import get_video_id
from main.models import Show, Location, Episode, Raw_File, Cut_List
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return repr(self.value)
class post(process):
ready_state = 4
def get_tags(self,ep):
tags = [ ep.show.client.slug, ep.show.slug, ]
# for more_tags in [ ep.show.client.tags, ep.tags, ep.authors ]:
for more_tags in [ ep.show.client.tags, ep.authors ]:
if more_tags is not None:
tags += more_tags.split(',')
# remove spaces
tags = [ tag.replace(' ','') for tag in tags ]
# remove any empty tags
tags = [_f for _f in tags if _f]
return tags
def get_files(self, ep):
# get a list of video files to upload
# blip and archive support multiple formats, youtube does not.
# youtube and such will only upload the first file.
files = []
for ext in self.options.upload_formats:
src_pathname = os.path.join( self.show_dir, ext, "%s.%s"%(ep.slug,ext))
if os.path.exists(src_pathname):
files.append({'ext':ext,'pathname':src_pathname})
else:
# crapy place to abort, but meh, works for now.
# maybe this is the place to use raise?
print("not found:", src_pathname)
raise FileNotFound(src_pathname)
if self.options.debug_log:
# put the mlt and .sh stuff into the log
# blip and firefox want it to be xml, so jump though some hoops
log = "<log>\n"
mlt_pathname = os.path.join( self.show_dir, 'mlt', "%s.mlt"%(ep.slug,))
log += open(mlt_pathname).read()
sh_pathname = os.path.join( self.show_dir, 'tmp', "%s.sh"%(ep.slug,))
shs = open(sh_pathname).read().split('\n')
shs = [ "<line>\n%s\n</line>\n" % l for l in shs if l]
log += "<shell_script>\n%s</shell_script>\n" % ''.join(shs)
log += "</log>"
# blip says: try something like a tt or srt file
log_pathname = os.path.join( self.show_dir, 'tmp', "%s.tt"%(ep.slug,))
log_file=open(log_pathname,'w').write(log)
# add the log to the list of files to be posted
files.append({'ext':'tt', 'pathname':log_pathname})
return files
def collect_metadata(self, ep):
meta = {}
meta['title'] = '"{title}" - {authors} ({show})'.format(
title=ep.name, authors=ep.authors, show=ep.show.name)
if len(meta['title']) > 100:
meta['title'] = ep.name
meta['authors'] = ep.authors.split(',')
meta['description'] = ep.composed_description()
meta['tags'] = self.get_tags(ep)
meta['start'] = ep.start
meta['language'] = ep.language
meta['language'] = "eng"
# YouTube treats 'creativeCommon' == 'CC BY 3.0'
# Only use it when the license **exactly** matches that, even if for
# a different CC license.
# Reference: https://support.google.com/youtube/answer/2797468?hl=en
# Context: https://2019.pycon-au.org/news/video-licencing-changes/
if ep.license and (ep.license.upper().replace('-', ' ') in (
'CC BY', 'CC BY 3.0')):
meta['license'] = 'creativeCommon'
else:
# We're not sure -- play it safe.
meta['license'] = 'youtube'
# meta['rating'] = self.options.rating
# http://gdata.youtube.com/schemas/2007/categories.cat
meta['category'] = 27 # "Education"
if ep.location.lat and ep.location.lon:
meta['latlon'] = (ep.location.lat, ep.location.lon)
meta['privacyStatus'] = 'unlisted'
return meta
def mk_key(self, ep, f):
# make a key for rackspace cdn object key value store
# <category-slug>/<video-id>_<title-of-video>.mp4
# if we have that data handy.
# otherwise client/show/slug
key = ''
if ep.show.client.category_key:
# warning: this does not take into account pvo collisions
# https://github.com/willkg/richard/blob/master/richard/videos/utils.py#L20 def generate_unique_slug(obj, slug_from, slug_field='slug'):
key += slugify( ep.show.client.category_key ) + '/'
else:
key += ep.show.client.slug + '/'+ ep.show.client.slug + '/'
if ep.public_url:
key += get_video_id( ep.public_url) + "_"
key += ep.slug[:50] + "." + f['ext']
return key
def do_yt(self,ep,files,private,meta):
youtube_success = False
# https://developers.google.com/youtube/v3/docs/videos#resource
assert len(meta['title'])<=100, "len(name) > maximum length of 100"
uploader = youtube_v3_uploader.Uploader()
uploader.oauth_file = \
pw.yt[ep.show.client.youtube_id]['filename']
uploader.pathname = files[0]['pathname']
uploader.meta = meta
uploader.private = private
if self.options.test:
print('test mode:')
print("user key:", uploader.user)
print('files = %s' % files)
print('meta = %s' % pprint.pformat(meta))
print('skipping youtube_upoad.py uploader.upload()')
print(len(meta['description']))
elif ep.host_url and not self.options.replace:
print("skipping youtube, already there.")
youtube_success = True
else:
if ep.host_url:
uploader.delete_video(ep.host_url)
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
# if self.options.verbose: print uploader.new_url
print((uploader.new_url))
# save new youtube url
ep.host_url = uploader.new_url
# the thumb url
ep.thumbnail = uploader.thumbnail
# for test framework
self.last_url = uploader.new_url
else:
print("youtube error! zomg")
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
self.save_me(ep)
return youtube_success
def do_ia(self, ep, files, meta):
# upload to archive.org too.
# this should be in post_ia.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth?
uploader = ia_uploader.Uploader()
uploader.user = ep.show.client.archive_id
# transform veyepar meta to ia meta
if ep.license.upper().startswith('CC'):
x=ep.license[3:8].lower()
ver='4.0'
meta['licenseurl'] = 'http://creativecommons.org/licenses/{x}/{ver}/'.format(x=x,ver=ver)
for f in files:
uploader.pathname = f['pathname']
uploader.verbose = self.options.verbose
slug = "{show}-{slug}".format(
show=ep.show.slug,
slug=ep.slug)[:100]
# IA requires this: ^[a-zA-Z0-9][a-zA-Z0-9_.-]{4,100}$
slug = re.sub(r'[^a-zA-Z0-9_.-]', '', slug)
uploader.slug = slug
uploader.meta = meta
if self.options.test:
print('test mode...')
print('skipping archive_uploader .upload()')
ia_success = False
elif ep.archive_url and not self.options.replace:
print("skipping archive, file already there.")
ia_success = True
else:
# actually upload
# uploader.debug_mode=True
ia_success = uploader.upload()
if ia_success:
if self.options.verbose: print(uploader.new_url)
# store the archive url (page)
ep.archive_url = uploader.new_url
archive_file_url = "{page}/{slug}.{ext}".format(
page=uploader.new_url,
slug=ep.slug,
ext=f['ext'])
# this is pretty gross.
if f['ext'] == "mp4":
ep.archive_mp4_url = archive_file_url
elif f['ext'] == "ogv":
ep.archive_ogv_url = archive_file_url
elif f['ext'] == "webm": # omg super gross.
ep.archive_ogv_url = archive_file_url
# hook for tests so that it can be browsed
self.archive_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("Internet archive.org error!")
self.save_me(ep)
return ia_success
def do_rax(self, ep, files, meta):
# upload to rackspace cdn too.. yuck.
# this should be in post_rax.py, but
# but I don't want 2 processes uploading at the same time.
# bcause bandwidth? or something.
# Not sure what the problem is really.
if self.options.verbose: print("do_rax...")
success = False
uploader = rax_uploader.Uploader()
uploader.user = ep.show.client.rax_id
uploader.bucket_id = ep.show.client.bucket_id
for f in files:
uploader.pathname = f['pathname']
uploader.key_id = self.mk_key(ep, f)
if self.options.test:
print('test mode...')
print('skipping rax_uploader .upload()')
print('key_id:', uploader.key_id)
elif ep.rax_mp4_url and not self.options.replace:
# above assumes rax_mp4_url is what gets filled in below
# this is so gross.
print("skipping rax, already there.")
success = True
else:
# actually upload
# uploader.debug_mode=True
success = uploader.upload()
# possible errors:
# invalid container - halt, it will likely be invalid for all
# transmission - retry
# bad name, mark as error and continue to next
if success:
if self.options.verbose: print(uploader.new_url)
# this is pretty gross.
# store the url
if f['ext'] == "mp4":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "webm":
ep.rax_mp4_url = uploader.new_url
elif f['ext'] == "ogv":
# there is no ep.rax_ogv_url
ep.rax_ogv_url = uploader.new_url
# hook for tests so that it can be browsed
# self.rax_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("rax error!")
self.save_me(ep)
return success
def do_vimeo(self,ep,files,private,meta):
vimeo_success = False
uploader = vimeo_uploader.Uploader()
uploader.user = ep.show.client.vimeo_id
uploader.pathname = files[0]['pathname']
uploader.meta = meta
if self.options.test:
print('test mode:')
print("user key:", uploader.user)
print('files = %s' % files)
print('meta = %s' % pprint.pformat(meta))
print('skipping vimeo_upoad.py uploader.upload()')
print(len(meta['description']))
elif ep.host_url and not self.options.replace:
print("skipping vimeo, already there.")
youtube_success = True
else:
# down to next layer of code that will do the uploading
# uploader.debug_mode=True
youtube_success = uploader.upload()
if youtube_success:
if self.options.verbose: print(uploader.new_url)
# save new youtube url
ep.host_url = uploader.new_url
# for test framework
self.last_url = uploader.new_url
else:
print("youtube error! zomg")
ep.comment += "\n%s\n" % (uploader.ret_text.decode('utf-8').encode('ascii', 'xmlcharrefreplace'))
self.save_me(ep)
return youtube_success
def process_ep(self, ep):
if not ep.released and ep.released is not None: # and not self.options.release_all:
# --release will force the upload, overrides ep.released
if self.options.verbose: print("not released:", ep.released)
return False
# collect data needed for uploading
files = self.get_files(ep)
if self.options.verbose:
print("[files]:", end=' ')
pprint.pprint(files)
meta = self.collect_metadata(ep)
if self.options.verbose: pprint.pprint(meta)
# upload youtube
if not ep.show.client.youtube_id: youtube_success = True
else: youtube_success = self.do_yt(ep,files,True,meta)
# upload archive.org
if not ep.show.client.archive_id: archive_success = True
else: archive_success = self.do_ia(ep,files,meta)
# upload rackspace cdn
# if not ep.show.client.rax_id: rax_success = True
# else: rax_success = self.do_rax(ep,files,meta)
# upload vimeo (needs upgrading to new api)
# if not ep.show.client.vimeo_id: vimeo_success = True
# else: vimeo_success = self.do_vimeo(ep,files,meta)
return True
# youtube_success
# and archive_success \
# and rax_success
def add_more_options(self, parser):
parser.add_option('--replace', action="store_true",
help="Upload again, step on existing URL.")
parser.add_option('--release-all', action="store_true",
help="ignore the released setting (assuming this is enabled.)")
if __name__ == '__main__':
p=post()
p.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Consumes tweets from the Twitter Streaming API and puts them on a
redis queue. In addition, it listens to the redis queue, for:
* new hashtags and follower ids to add to the track predicates
* a "reload" instruction
XXX implement the ``manager.reload_predicates()`` method to actually follow
the right users and keywords. For now, we've hardcoded to just track "#syria".
Under the hood, the queue processor and the streaming consumer are both run
in their own threads. The local queue processor is resilient but the Twitter
Streaming Api enjoys falling over. When it does, the active client will set
``self.running`` to ``False``. In the main control thread, we poll this flag
every second and fire up a new client whenever it falls over.
We'll miss a tweet every now and then but that's not the end of the world.
"""
import logging
import logging.config
logger = logging.getLogger(__name__)
import argparse
import ConfigParser
import os
import threading
import time
import tweepy
from sqlalchemy import create_engine
from pyramid_basemodel import Session, bind_engine
from pyramid_twitterauth.model import TwitterAccount
from .hooks import get_redis_client
from .model import Session, Hashtag
from .queue import QueueProcessor
INPUT_CHANNEL = 'beliveat.stream:instructions'
OUTPUT_CHANNEL = 'beliveat.queue:input'
def oauth_handler_factory(config, section_key='app:beliveat', cls=tweepy.OAuthHandler):
"""Return an authenticated Twitter Streaming API consumer."""
# Create the handler.
k = section_key
consumer_key = config.get(k, 'twitterauth.oauth_consumer_key')
consumer_secret = config.get(k, 'twitterauth.oauth_consumer_secret')
handler = cls(consumer_key, consumer_secret, secure=True)
# Set the access token.
access_token_key = config.get(k, 'twitter_consumer.access_token_key')
access_token_secret = config.get(k, 'twitter_consumer.access_token_secret')
handler.set_access_token(access_token_key, access_token_secret)
# Return the authenticated handler.
return handler
def get_all_twitter_ids(session_cls=None):
"""Get all the twitter ids in the db."""
# Test jig.
if session_cls is None:
session_cls = Session
query = session_cls.query(TwitterAccount.twitter_id).limit(5000)
return [item[0] for item in query.all()]
def get_track_keywords(hashtag_cls=None):
"""Get the hashtag values of all the active stories."""
# Test jig.
if hashtag_cls is None:
hashtag_cls = Hashtag
query = hashtag_cls.query.filter(hashtag_cls.story!=None)
query = query.order_by(hashtag_cls.modified.desc()).limit(400)
hashtags = query.all()
return [u'#{0}'.format(item.value) for item in hashtags]
class StreamListener(tweepy.StreamListener):
"""Handle data from the Streaming API client."""
def on_data(self, data_str):
"""Called when raw data is received from connection."""
# Call the handle_function
self.handle_function(data_str)
def on_error(self, status_code):
logger.warn('Error: status code %s' % status_code)
return True # keep stream alive
def on_timeout(self):
logger.info('timeout')
def __init__(self, handle_function, api=None):
logger.info('StreamListener()')
super(StreamListener, self).__init__(api=api)
self.handle_function = handle_function
class Manager(object):
"""Load the follow ids from the database and start a streaming api client
and a redis queue procesor. When a new follow id comes form redis,
add it to the follow ids and restart the client.
"""
clients = []
follow_ids = []
track_keywords = []
def handle_twitter_data(self, data_str):
"""Put incoming tweets onto the redis queue."""
# If the text doesn't looks valid, ignore it.
is_status = 'in_reply_to_status_id' in data_str
is_deletion = 'delete' in data_str
if not bool(is_status or is_deletion):
return
# Otherwise put it on the output queue.
self.redis.rpush(self.output_channel, data_str)
def handle_queue_data(self, data_str):
"""We accept two different instructions from the redis queue:
1. if passed a string in the form ``follow:int_follow_id`` add
``int_follow_id`` to the follow ids and reconnect
2. if passed a string in the form ``track:keyword`` add ``keyword``
to the track predicates and reconnect
3. if passed "consumer:reload", reconnect
"""
logger.info('Manager.handle_queue_data()')
logger.info(data_str)
# We don't want to fire up a new client if we're already closing.
if not self.running:
return
# Decode to unicode.
text = unicode(data_str, 'utf-8')
# If explicitly told to, reload the follower ids and reconnect.
if text == 'consumer:reload':
self.reload_predicates()
return self.reconnect()
# Otherwise, if it's a follower id.
if text.startswith('follow:'):
try: # Parse the text into an int follower id.
follower_id = int(text[7:])
except ValueError as err:
logger.warn(err)
else: # Follow the new user and reload.
if not bool(follower_id in self.follower_ids):
self.follower_ids.append(follower_id)
return self.reconnect()
# Otherwise, if it's a track predicate.
if text.startswith('track:'):
try: # Parse the text into a keyword.
keyword = text[6:].strip()
except ValueError as err:
logger.warn(err)
else: # Follow the new user and reload.
if not bool(keyword in self.track_keywords):
self.track_keywords.append(keyword)
return self.reconnect()
def reload_predicates(self, get_twitter_ids=None, get_keywords=None):
"""Load the filter predicates."""
logger.warn('Reloading predicates...')
logger.warn('- current:')
logger.warn(self.follow_ids)
logger.warn(self.track_keywords)
if get_twitter_ids is None:
get_twitter_ids = get_all_twitter_ids
if get_keywords is None:
get_keywords = get_track_keywords
self.follow_ids = get_twitter_ids()
self.track_keywords = get_keywords()
# Close the db connection
Session.remove()
logger.warn('- new:')
logger.warn(self.follow_ids)
logger.warn(self.track_keywords)
def reconnect(self):
"""Disconnect existing clients and fire up a new one."""
self.disconnect_existing_clients()
self.fire_up_new_client()
def disconnect_existing_clients(self):
"""Tell all the existing clients to disconnect."""
logger.info('Manager.disconnect_existing_clients()')
for client in self.clients:
client.disconnect()
self.clients.remove(client)
def fire_up_new_client(self, cls=tweepy.streaming.Stream):
"""Create a new streaming client and start it going."""
logger.info('Manager.fire_up_new_client()')
client = cls(self.oauth_handler, self.stream_listener, timeout=35)
client.filter(follow=self.follow_ids, track=self.track_keywords, async=True)
self.clients.append(client)
def stop(self):
"""Stop the processor and disconnect the clients."""
logger.info('Manager.stop()')
self.running = False
self.processor.stop()
self.disconnect_existing_clients()
def start(self):
"""Loop forever. If there isn't an active client, fire one up. If the
queue processor isn't running, fire that up.
"""
logger.info('Manager.start()')
self.running = True
self.processor.start(async=True)
while self.running:
if not self.clients or not self.clients[-1].running:
self.fire_up_new_client()
time.sleep(1)
def __init__(self, redis_client, oauth_handler, input_channel, output_channel):
"""Setup the stream listener ready to handle data, the streaming api
auth handler and queue processor and load the follow ids.
"""
logger.info('Manager.__init__()')
# Setup the stream listener, telling it to pass data from the streaming
# api to ``self.handle_twitter_data``.
self.stream_listener = StreamListener(self.handle_twitter_data)
# Setup the queue processor, telling it to pass data from the redis
# client to ``self.handle_queue_data``.
self.redis = redis_client
self.output_channel = output_channel
self.processor = QueueProcessor(redis_client, [input_channel],
self.handle_queue_data)
# Save a handle on the Twitter oauth handler.
self.oauth_handler = oauth_handler
# Load the filter predicates.
self.reload_predicates()
def parse_args(parser_cls=argparse.ArgumentParser):
"""Parse the command line arguments."""
parser = parser_cls()
parser.add_argument('config_file', metavar='CONFIG_FILE', nargs=1)
parser.add_argument("--input", dest="input_channel", default=INPUT_CHANNEL)
parser.add_argument("--output", dest="output_channel", default=OUTPUT_CHANNEL)
return parser.parse_args()
def main(args=None):
"""Consume the Twitter Streaming API."""
# Write a pid file.
f = open('stream.pid', 'w')
f.write(str(os.getpid()))
f.close()
# Parse the command line args.
if args is None:
args = parse_args()
# Read the config file.
config = ConfigParser.SafeConfigParser()
config.read(args.config_file)
# Setup logging.
logging.config.fileConfig(args.config_file)
# Patch sockets and threading.
from gevent import monkey
monkey.patch_all()
import gevent_psycopg2
gevent_psycopg2.monkey_patch()
# Bind the model classes.
engine = create_engine(config.get('app:beliveat', 'sqlalchemy.url'))
bind_engine(engine)
# Instantiate a ``Manager`` with a redis client and oauth handler and
# start the manager running.
client = get_redis_client()
handler = oauth_handler_factory(config)
manager = Manager(client, handler, args.input_channel, args.output_channel)
# Close the db connection
Session.remove()
try:
manager.start()
except KeyboardInterrupt:
manager.stop()
if __name__ == '__main__':
main()
|
|
from mss import mss
from PIL import Image, ImageTk
from collections import deque
from tkinter import ttk
import tkinter as tk
import threading, queue, time, sys, sdl2, pickle, json, os
from JoystickInput import JoystickInput_SDL
from DataViewer import DataViewerFrame
from JoystickServer import JoystickServerFrame
globalJoystick, globalJoystickInput = None, None
#window handling code, it has to be platform specific unfortunately
if sys.platform == 'linux':
#get window size/position using wnck (linux only)
import gi
gi.require_version('Wnck', '3.0')
from gi.repository import Wnck
def getWindowGeometry(name):
Wnck.Screen.get_default().force_update()
window_list = Wnck.Screen.get_default().get_windows()
for win in window_list:
if name in win.get_name():
geometry = list(win.get_geometry())
geometry[1] = geometry[1] + 25
geometry[3] = geometry[3] - 25
return geometry
return (200, 200, 600, 400)
elif sys.platform == 'win32' or sys.platform == 'cygwin':
import win32gui
def findWindowCallback(hwnd, name):
theName = win32gui.GetWindowText(hwnd)
if name in theName :
global win32_rect
win32_rect = win32gui.GetWindowRect(hwnd)
def getWindowGeometry(name):
global win32_rect
win32_rect = (200, 200, 600, 400)
win32gui.EnumWindows(findWindowCallback, name)
x = win32_rect[0]
y = win32_rect[1]
w = win32_rect[2] - x
h = win32_rect[3] - y
return (x, y, w, h)
def setGlobalWindowGeometry(windowName):
global windowGeometry
windowGeometry = getWindowGeometry(windowName)
setGlobalWindowGeometry('Mupen64Plus')
screenShotReSize = 320, 240
def getScreenShot():
with mss() as sct:
x = windowGeometry[0]
y = windowGeometry[1]
width = windowGeometry[2]
height = windowGeometry[3]
mon = {'top': y, 'left': x, 'width': width, 'height': height}
img = Image.frombytes('RGB', (width, height), sct.get_pixels(mon))
img.thumbnail(screenShotReSize, Image.ANTIALIAS)
return img
def getJoystickState(joystickInput):
return joystickInput.getJoystickState()
class FPSCounter():
def __init__(self):
self.deque = deque(maxlen=60)
def increment(self):
self.deque.append(time.time())
def getFPS(self):
if(len(self.deque) <= 1):
return 0
self.elapsedTime = time.time() - self.deque[0]
if(self.elapsedTime <= 0):
return 0
return len(self.deque) / self.elapsedTime
def ss_thread(q, stop_event):
"""q is a Queue object, stop_event is an Event.
stop_event from http://stackoverflow.com/questions/6524459/stopping-a-thread-python
"""
while(not stop_event.is_set()):
if q.empty():
sdl2.SDL_PumpEvents()
q.put((getScreenShot(), getJoystickState(globalJoystickInput)))
class App(object):
def __init__(self):
self.root = tk.Tk()
self.root.wm_title("SpartanMarioKartAi")
self.notebook = ttk.Notebook(self.root)
self.dataRecorderFrame = DataRecorderFrame(self.notebook)
self.dataViewerFrame = DataViewerFrame(self.notebook)
self.notebook.add(self.dataRecorderFrame.root, text="Record Data", compound=tk.TOP)
self.notebook.add(self.dataViewerFrame.root, text="View Data", compound=tk.TOP)
self.notebook.pack()
self.poll_thread_stop_event = threading.Event()
self.poll_thread = threading.Thread(target=ss_thread, name='Thread', args=(self.dataRecorderFrame.queue,self.poll_thread_stop_event))
self.poll_thread.start()
self.dataRecorderFrame.poll_interval = 10
self.dataRecorderFrame.poll()
self.root.wm_protocol("WM_DELETE_WINDOW", self.cleanup_on_exit)
def cleanup_on_exit(self):
"""Needed to shutdown the polling thread."""
self.poll_thread_stop_event.set()
self.root.quit() #Allow the rest of the quit process to continue
class DataRecorderFrame(object):
def __init__(self, master):
self.root = tk.Frame(master)
self.fpsCounter = FPSCounter()
self.fpsLabel = tk.Label(self.root, text='')
self.fpsLabel.pack()
self.ss = getScreenShot()
self.ssLabel = tk.Label(self.root, image=ImageTk.PhotoImage(self.ss))
self.ssLabel.pack()
self.isRecording = False
self.recordFrame = tk.Frame(self.root)
self.recordLabel = tk.Label(self.recordFrame, text='Output Location: ')
self.recordLabel.pack(side=tk.LEFT)
self.recordEntryText = tk.StringVar()
self.recordEntryText.set("samples")
self.recordEntry = tk.Entry(self.recordFrame, textvariable=self.recordEntryText)
self.recordEntry.pack(side=tk.LEFT)
self.recordButtonText = tk.StringVar()
self.recordButtonText.set("Start Recording")
self.recordButton = tk.Button(self.recordFrame, textvariable=self.recordButtonText, command=self.recordButtonClick)
self.recordButton.pack(side=tk.LEFT)
self.recordTimeRunningText = tk.StringVar()
self.recordTimeRunningText.set('0')
self.recordTimeRunningLabel = tk.Label(self.recordFrame, textvariable=self.recordTimeRunningText)
self.recordTimeRunningLabel.pack(side=tk.LEFT)
self.recordFrame.pack()
self.recordNTimesRecorded = 0
self.jsTextArea = tk.Text(self.root, height=3)
self.jsTextArea.pack()
self.queue = queue.Queue(maxsize=1)
self.root.pack()
def poll(self):
if self.queue.qsize():
self.queue_head = self.queue.get()
self.fpsCounter.increment()
self.fpsText = "FPS: %.2f" % (self.fpsCounter.getFPS())
self.fpsLabel.configure(text=self.fpsText)
self.ss = ImageTk.PhotoImage(self.queue_head[0])
self.ssLabel.configure(image=self.ss)
self.ssLabel.update_idletasks()
self.jsString = str(self.queue_head[1])
self.jsTextArea.delete(1.0, tk.END)
self.jsTextArea.insert(1.0, self.jsString)
if(self.isRecording):
self.recordData.append(self.queue_head)
if(len(self.recordData) >= 100):
with open(self.recordDirectory + '/' + str(self.recordSampleNumber), 'wb') as self.handle:
pickle.dump(self.recordData, self.handle)
self.recordSampleNumber += 1
self.recordData = []
self.recordTimeRunningText.set("%.2f" % (time.time()-self.recordTimeStarted))
self._poll_job_id = self.root.after(self.poll_interval, self.poll)
def recordButtonClick(self):
self.isRecording = not self.isRecording
if(self.isRecording):
self.recordTimeStarted = time.time()
self.recordNTimesRecorded += 1
self.recordSampleNumber = 1
self.recordData = []
if not os.path.exists(self.recordEntryText.get()):
os.makedirs(self.recordEntryText.get())
self.recordDirectory = self.recordEntryText.get() + '/' + str(self.recordNTimesRecorded)
if not os.path.exists(self.recordDirectory):
os.makedirs(self.recordDirectory)
self.recordButtonText.set("Stop Recording")
else:
if(len(self.recordData) > 0):
with open(self.recordDirectory + '/' + str(self.recordSampleNumber), 'wb') as self.handle:
pickle.dump(self.recordData, self.handle)
self.recordButtonText.set("Start Recording")
if(__name__ == '__main__'):
sdl2.SDL_Init(sdl2.SDL_INIT_JOYSTICK)
globalJoystick = sdl2.SDL_JoystickOpen(0)
globalJoystickInput = JoystickInput_SDL(globalJoystick)
app = App()
app.root.mainloop()
sdl2.SDL_Quit()
|
|
from cStringIO import StringIO
import boto.connection
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.utils
import bunch
import nose
import operator
import random
import string
import socket
import ssl
import os
import re
from email.utils import formatdate
from urlparse import urlparse
from boto.s3.connection import S3Connection
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .utils import assert_raises
import AnonymousAuth
from email.header import decode_header
from . import (
_make_raw_request,
nuke_prefixed_buckets,
get_new_bucket,
s3,
config,
get_prefix,
TargetConnection,
targets,
)
_orig_conn = {}
_orig_authorize = None
_custom_headers = {}
_remove_headers = []
boto_type = None
# HeaderS3Connection and _our_authorize are necessary to be able to arbitrarily
# overwrite headers. Depending on the version of boto, one or the other is
# necessary. We later determine in setup what needs to be used.
def _update_headers(headers):
""" update a set of headers with additions/removals
"""
global _custom_headers, _remove_headers
headers.update(_custom_headers)
for header in _remove_headers:
try:
del headers[header]
except KeyError:
pass
# Note: We need to update the headers twice. The first time so the
# authentication signing is done correctly. The second time to overwrite any
# headers modified or created in the authentication step.
class HeaderS3Connection(S3Connection):
""" establish an authenticated connection w/customized headers
"""
def fill_in_auth(self, http_request, **kwargs):
_update_headers(http_request.headers)
S3Connection.fill_in_auth(self, http_request, **kwargs)
_update_headers(http_request.headers)
return http_request
def _our_authorize(self, connection, **kwargs):
""" perform an authentication w/customized headers
"""
_update_headers(self.headers)
_orig_authorize(self, connection, **kwargs)
_update_headers(self.headers)
def setup():
global boto_type
# we determine what we need to replace by the existence of particular
# attributes. boto 2.0rc1 as fill_in_auth for S3Connection, while boto 2.0
# has authorize for HTTPRequest.
if hasattr(S3Connection, 'fill_in_auth'):
global _orig_conn
boto_type = 'S3Connection'
for conn in s3:
_orig_conn[conn] = s3[conn]
header_conn = HeaderS3Connection(
aws_access_key_id=s3[conn].aws_access_key_id,
aws_secret_access_key=s3[conn].aws_secret_access_key,
is_secure=s3[conn].is_secure,
port=s3[conn].port,
host=s3[conn].host,
calling_format=s3[conn].calling_format
)
s3[conn] = header_conn
elif hasattr(boto.connection.HTTPRequest, 'authorize'):
global _orig_authorize
boto_type = 'HTTPRequest'
_orig_authorize = boto.connection.HTTPRequest.authorize
boto.connection.HTTPRequest.authorize = _our_authorize
else:
raise RuntimeError
def teardown():
global boto_type
# replace original functionality depending on the boto version
if boto_type is 'S3Connection':
global _orig_conn
for conn in s3:
s3[conn] = _orig_conn[conn]
_orig_conn = {}
elif boto_type is 'HTTPRequest':
global _orig_authorize
boto.connection.HTTPRequest.authorize = _orig_authorize
_orig_authorize = None
else:
raise RuntimeError
def _clear_custom_headers():
""" Eliminate any header customizations
"""
global _custom_headers, _remove_headers
_custom_headers = {}
_remove_headers = []
def _add_custom_headers(headers=None, remove=None):
""" Define header customizations (additions, replacements, removals)
"""
global _custom_headers, _remove_headers
if not _custom_headers:
_custom_headers = {}
if headers is not None:
_custom_headers.update(headers)
if remove is not None:
_remove_headers.extend(remove)
def _setup_bad_object(headers=None, remove=None):
""" Create a new bucket, add an object w/header customizations
"""
bucket = get_new_bucket()
_add_custom_headers(headers=headers, remove=remove)
return bucket.new_key('foo')
def tag(*tags):
def wrap(func):
for tag in tags:
setattr(func, tag, True)
return func
return wrap
#
# common tests
#
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_short():
key = _setup_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/mismatched MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_bad():
key = _setup_bad_object({'Content-MD5':'rL0Y20zC+Fzt72VPzMSk2A=='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'BadDigest')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_empty():
key = _setup_bad_object({'Content-MD5': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphics in MD5')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_unreadable():
key = _setup_bad_object({'Content-MD5': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no MD5 header')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_none():
key = _setup_bad_object(remove=('Content-MD5',))
key.set_contents_from_string('bar')
# strangely, amazon doesn't report an error with a non-expect 100 also, our
# error comes back as html, and not xml as I normally expect
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/Expect 200')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_object_create_bad_expect_mismatch():
key = _setup_bad_object({'Expect': 200})
key.set_contents_from_string('bar')
# this is a really long test, and I don't know if it's valid...
# again, accepts this with no troubles
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty expect')
@attr(assertion='succeeds ... should it?')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_expect_empty():
key = _setup_bad_object({'Expect': ''})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no expect')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_expect_none():
key = _setup_bad_object(remove=('Expect',))
key.set_contents_from_string('bar')
# this is a really long test..
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic expect')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
def test_object_create_bad_expect_unreadable():
key = _setup_bad_object({'Expect': '\x07'})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty content length')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
def test_object_create_bad_contentlength_empty():
key = _setup_bad_object({'Content-Length': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/negative content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_negative():
key = _setup_bad_object({'Content-Length': -1})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='fails 411')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_none():
key = _setup_bad_object(remove=('Content-Length',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 411)
eq(e.reason, 'Length Required')
eq(e.error_code,'MissingContentLength')
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_unreadable():
key = _setup_bad_object({'Content-Length': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too long')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_object_create_bad_contentlength_mismatch_above():
content = 'bar'
length = len(content) + 1
key = _setup_bad_object({'Content-Length': length})
# Disable retries since key.should_retry will discard the response with
# PleaseRetryException.
def no_retry(response, chunked_transfer): return False
key.should_retry = no_retry
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'RequestTimeout')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content type text/plain')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_invalid():
key = _setup_bad_object({'Content-Type': 'text/plain'})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty content type')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_empty():
key = _setup_bad_object({'Content-Type': ''})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no content type')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_none():
key = _setup_bad_object(remove=('Content-Type',))
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic content type')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
def test_object_create_bad_contenttype_unreadable():
key = _setup_bad_object({'Content-Type': '\x08'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
def test_object_create_bad_authorization_unreadable():
key = _setup_bad_object({'Authorization': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_empty():
key = _setup_bad_object({'Authorization': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date and x-amz-date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_s3proxy')
def test_object_create_date_and_amz_date():
date = formatdate(usegmt=True)
key = _setup_bad_object({'Date': date, 'X-Amz-Date': date})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date and no date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_amz_date_and_no_date():
date = formatdate(usegmt=True)
key = _setup_bad_object({'X-Amz-Date': date}, ('Date',))
key.set_contents_from_string('bar')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_none():
key = _setup_bad_object(remove=('Authorization',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_contentlength_none():
_add_custom_headers(remove=('Content-Length',))
get_new_bucket()
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='acls')
@attr(operation='set w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_acl_create_contentlength_none():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('blah')
_add_custom_headers(remove=('Content-Length',))
key.set_acl('public-read')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='acls')
@attr(operation='set w/invalid permission')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_put_bad_canned_acl():
bucket = get_new_bucket()
_add_custom_headers({'x-amz-acl': 'public-ready'})
e = assert_raises(boto.exception.S3ResponseError, bucket.set_acl, 'public-read')
eq(e.status, 400)
# strangely, amazon doesn't report an error with a non-expect 100 also, our
# error comes back as html, and not xml as I normally expect
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect 200')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_bucket_create_bad_expect_mismatch():
_add_custom_headers({'Expect':200})
bucket = get_new_bucket()
# this is a really long test, and I don't know if it's valid...
# again, accepts this with no troubles
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect empty')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_expect_empty():
_add_custom_headers({'Expect': ''})
bucket = get_new_bucket()
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect nongraphic')
@attr(assertion='garbage, but S3 succeeds!')
# this is a really long test..
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_bucket_create_bad_expect_unreadable():
_add_custom_headers({'Expect': '\x07'})
bucket = get_new_bucket()
def _create_new_connection():
# We're going to need to manually build a connection using bad authorization info.
# But to save the day, lets just hijack the settings from s3.main. :)
main = s3.main
conn = HeaderS3Connection(
aws_access_key_id=main.aws_access_key_id,
aws_secret_access_key=main.aws_secret_access_key,
is_secure=main.is_secure,
port=main.port,
host=main.host,
calling_format=main.calling_format,
)
return TargetConnection(targets.main.default.conf, conn)
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty content length')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
def test_bucket_create_bad_contentlength_empty():
conn = _create_new_connection()
_add_custom_headers({'Content-Length': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, conn)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/negative content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_negative():
_add_custom_headers({'Content-Length': -1})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_none():
_add_custom_headers(remove=('Content-Length',))
bucket = get_new_bucket()
@attr('fails_on_s3proxy')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_unreadable():
_add_custom_headers({'Content-Length': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
def test_bucket_create_bad_authorization_unreadable():
_add_custom_headers({'Authorization': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_empty():
_add_custom_headers({'Authorization': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_none():
_add_custom_headers(remove=('Authorization',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
#
# AWS2 specific tests
#
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_garbage_aws2():
check_aws2_support()
key = _setup_bad_object({'Content-MD5':'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too short')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_mismatch_below_aws2():
check_aws2_support()
content = 'bar'
length = len(content) - 1
key = _setup_bad_object({'Content-Length': length})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'BadDigest')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/incorrect authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_incorrect_aws2():
check_aws2_support()
key = _setup_bad_object({'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
@tag('auth_aws2')
@nose.with_setup(teardown=_clear_custom_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
def test_object_create_bad_authorization_invalid_aws2():
check_aws2_support()
key = _setup_bad_object({'Authorization': 'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_empty_aws2():
check_aws2_support()
key = _setup_bad_object({'User-Agent': ''})
key.set_contents_from_string('bar')
@attr('fails_on_s3proxy')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='succeeds')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_unreadable_aws2():
check_aws2_support()
key = _setup_bad_object({'User-Agent': '\x07'})
key.set_contents_from_string('bar')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_none_aws2():
check_aws2_support()
key = _setup_bad_object(remove=('User-Agent',))
key.set_contents_from_string('bar')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_invalid_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@attr('fails_on_s3proxy')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_empty_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_unreadable_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_none_aws2():
check_aws2_support()
key = _setup_bad_object(remove=('Date',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_today_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_today_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_epoch_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_end_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_invalid_aws2():
check_aws2_support()
_add_custom_headers({'Authorization': 'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_empty_aws2():
check_aws2_support()
_add_custom_headers({'User-Agent': ''})
bucket = get_new_bucket()
@attr('fails_on_s3proxy')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='succeeds')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_unreadable_aws2():
check_aws2_support()
_add_custom_headers({'User-Agent': '\x07'})
bucket = get_new_bucket()
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_none_aws2():
check_aws2_support()
_add_custom_headers(remove=('User-Agent',))
bucket = get_new_bucket()
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_invalid_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@attr('fails_on_s3proxy')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_empty_aws2():
check_aws2_support()
_add_custom_headers({'Date': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_on_s3proxy')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_unreadable_aws2():
check_aws2_support()
_add_custom_headers({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_none_aws2():
check_aws2_support()
_add_custom_headers(remove=('Date',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_today_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_after_today_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_epoch_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
#
# AWS4 specific tests
#
def check_aws4_support():
if 'S3_USE_SIGV4' not in os.environ:
raise SkipTest
def check_aws2_support():
if 'S3_USE_SIGV4' in os.environ:
raise SkipTest
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_garbage_aws4():
check_aws4_support()
key = _setup_bad_object({'Content-MD5':'AWS4 HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too short')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_mismatch_below_aws4():
check_aws4_support()
content = 'bar'
length = len(content) - 1
key = _setup_bad_object({'Content-Length': length})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'XAmzContentSHA256Mismatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/incorrect authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_incorrect_aws4():
check_aws4_support()
key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=AKIAIGR7ZNNBHC5BKSUB/20150930/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent,Signature=FWeDfwojDSdS2Ztmpfeubhd9isU='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
@tag('auth_aws4')
@nose.with_setup(teardown=_clear_custom_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
def test_object_create_bad_authorization_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
assert e.error_code in ('AuthorizationHeaderMalformed', 'InvalidArgument')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'User-Agent': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'User-Agent': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('User-Agent',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Bad Date'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': ''})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic x-amz-date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('Date',))
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('X-Amz-Date',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_today_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_before_today_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '20100707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_today_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_after_today_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '20300707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_epoch_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_before_epoch_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '19500707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_end_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_after_end_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '99990707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create with missing signed custom header')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_missing_signed_custom_header_aws4():
check_aws4_support()
method='PUT'
expires_in='100000'
bucket = get_new_bucket()
key = bucket.new_key('foo')
body='zoo'
# compute the signature with 'x-amz-foo=bar' in the headers...
request_headers = {'x-amz-foo':'bar'}
url = key.generate_url(expires_in, method=method, headers=request_headers)
o = urlparse(url)
path = o.path + '?' + o.query
# avoid sending 'x-amz-foo=bar' in the headers
request_headers.pop('x-amz-foo')
res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
body=body, request_headers=request_headers, secure=s3.main.is_secure)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(opearation='create with missing signed header')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_missing_signed_header_aws4():
check_aws4_support()
method='PUT'
expires_in='100000'
bucket = get_new_bucket()
key = bucket.new_key('foo')
body='zoo'
# compute the signature...
request_headers = {}
url = key.generate_url(expires_in, method=method, headers=request_headers)
o = urlparse(url)
path = o.path + '?' + o.query
# 'X-Amz-Expires' is missing
target = r'&X-Amz-Expires=' + expires_in
path = re.sub(target, '', path)
res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
body=body, request_headers=request_headers, secure=s3.main.is_secure)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_invalid_aws4():
check_aws4_support()
_add_custom_headers({'Authorization': 'AWS4 HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_empty_aws4():
check_aws4_support()
_add_custom_headers({'User-Agent': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'User-Agent': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('User-Agent',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_invalid_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Bad Date'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_invalid_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_empty_aws4():
check_aws4_support()
_add_custom_headers({'Date': ''})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_empty_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic x-amz-date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('Date',))
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('X-Amz-Date',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_today_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_before_today_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '20100707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_after_today_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_after_today_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '20300707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_epoch_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_before_epoch_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '19500707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import utils
from cinder.volume.targets import driver
from cinder.volume import utils as vutils
LOG = logging.getLogger(__name__)
class ISCSITarget(driver.Target):
"""Target object for block storage devices.
Base class for target object, where target
is data transport mechanism (target) specific calls.
This includes things like create targets, attach, detach
etc.
"""
def __init__(self, *args, **kwargs):
super(ISCSITarget, self).__init__(*args, **kwargs)
self.iscsi_target_prefix = \
self.configuration.safe_get('iscsi_target_prefix')
self.iscsi_protocol = \
self.configuration.safe_get('iscsi_protocol')
self.protocol = 'iSCSI'
self.volumes_dir = self.configuration.safe_get('volumes_dir')
def _get_iscsi_properties(self, volume, multipath=False):
"""Gets iscsi configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in the
future.
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the uuid of the volume
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
:discard: boolean indicating if discard is supported
In some of drivers that support multiple connections (for multipath
and for single path with failover on connection failure), it returns
:target_iqns, :target_portals, :target_luns, which contain lists of
multiple values. The main portal information is also returned in
:target_iqn, :target_portal, :target_lun for backward compatibility.
Note that some of drivers don't return :target_portals even if they
support multipath. Then the connector should use sendtargets discovery
to find the other portals if it supports multipath.
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSCSI discovery output
properties['target_discovered'] = False
else:
location = self._do_iscsi_discovery(volume)
if not location:
msg = (_("Could not find iSCSI export for volume %s") %
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug(("ISCSI Discovery: Found %s") % (location))
properties['target_discovered'] = True
results = location.split(" ")
portals = results[0].split(",")[0].split(";")
iqn = results[1]
nr_portals = len(portals)
try:
lun = int(results[2])
except (IndexError, ValueError):
# NOTE(jdg): The following is carried over from the existing
# code. The trick here is that different targets use different
# default lun numbers, the base driver with tgtadm uses 1
# others like LIO use 0.
if (self.configuration.volume_driver ==
'cinder.volume.drivers.lvm.ThinLVMVolumeDriver' and
self.configuration.iscsi_helper == 'tgtadm'):
lun = 1
else:
lun = 0
if nr_portals > 1 or multipath:
properties['target_portals'] = portals
properties['target_iqns'] = [iqn] * nr_portals
properties['target_luns'] = [lun] * nr_portals
properties['target_portal'] = portals[0]
properties['target_iqn'] = iqn
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
geometry = volume.get('provider_geometry', None)
if geometry:
(physical_block_size, logical_block_size) = geometry.split()
properties['physical_block_size'] = physical_block_size
properties['logical_block_size'] = logical_block_size
encryption_key_id = volume.get('encryption_key_id', None)
properties['encrypted'] = encryption_key_id is not None
return properties
def _iscsi_authentication(self, chap, name, password):
return "%s %s %s" % (chap, name, password)
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warning(_LW("ISCSI provider_location not stored, using discovery"))
volume_id = volume['id']
try:
# NOTE(griff) We're doing the split straight away which should be
# safe since using '@' in hostname is considered invalid
(out, _err) = utils.execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
volume['host'].split('@')[0])
LOG.debug(("Error from iscsiadm -m discovery: %s") % ex.stderr)
return None
for target in out.splitlines():
if (self.configuration.safe_get('iscsi_ip_address') in target
and volume_id in target):
return target
return None
def _get_portals_config(self):
# Prepare portals configuration
portals_ips = ([self.configuration.iscsi_ip_address]
+ self.configuration.iscsi_secondary_ip_addresses or [])
return {'portals_ips': portals_ips,
'portals_port': self.configuration.iscsi_port}
def create_export(self, context, volume, volume_path):
"""Creates an export for a logical volume."""
# 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001'
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
iscsi_target, lun = self._get_target_and_lun(context, volume)
# Verify we haven't setup a CHAP creds file already
# if DNE no big deal, we'll just create it
chap_auth = self._get_target_chap_auth(context, iscsi_name)
if not chap_auth:
chap_auth = (vutils.generate_username(),
vutils.generate_password())
# Get portals ips and port
portals_config = self._get_portals_config()
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
tid = self.create_iscsi_target(iscsi_name,
iscsi_target,
lun,
volume_path,
chap_auth,
**portals_config)
data = {}
data['location'] = self._iscsi_location(
self.configuration.iscsi_ip_address, tid, iscsi_name, lun,
self.configuration.iscsi_secondary_ip_addresses)
LOG.debug('Set provider_location to: %s', data['location'])
data['auth'] = self._iscsi_authentication(
'CHAP', *chap_auth)
return data
def remove_export(self, context, volume):
try:
iscsi_target, lun = self._get_target_and_lun(context, volume)
except exception.NotFound:
LOG.info(_LI("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
try:
# NOTE: provider_location may be unset if the volume hasn't
# been exported
location = volume['provider_location'].split(' ')
iqn = location[1]
# ietadm show will exit with an error
# this export has already been removed
self.show_target(iscsi_target, iqn=iqn)
except Exception:
LOG.info(_LI("Skipping remove_export. No iscsi_target "
"is presently exported for volume: %s"), volume['id'])
return
# NOTE: For TgtAdm case volume['id'] is the ONLY param we need
self.remove_iscsi_target(iscsi_target, lun, volume['id'],
volume['name'])
def ensure_export(self, context, volume, volume_path):
"""Recreates an export for a logical volume."""
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
# Verify we haven't setup a CHAP creds file already
# if DNE no big deal, we'll just create it
chap_auth = self._get_target_chap_auth(context, iscsi_name)
if not chap_auth:
LOG.info(_LI("Skipping ensure_export. No iscsi_target "
"provision for volume: %s"), volume['id'])
# Get portals ips and port
portals_config = self._get_portals_config()
iscsi_target, lun = self._get_target_and_lun(context, volume)
self.create_iscsi_target(
iscsi_name, iscsi_target, lun, volume_path,
chap_auth, check_exit_code=False,
old_name=None, **portals_config)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': '9a0d35d0-175a-11e4-8c21-0800200c9a66',
'access_mode': 'rw',
'discard': False,
}
}
"""
iscsi_properties = self._get_iscsi_properties(volume,
connector.get(
'multipath'))
return {
'driver_volume_type': self.iscsi_protocol,
'data': iscsi_properties
}
def terminate_connection(self, volume, connector, **kwargs):
pass
def validate_connector(self, connector):
# NOTE(jdg): api passes in connector which is initiator info
if 'initiator' not in connector:
err_msg = (_LE('The volume driver requires the iSCSI initiator '
'name in the connector.'))
LOG.error(err_msg)
raise exception.InvalidConnectorException(missing='initiator')
return True
def _iscsi_location(self, ip, target, iqn, lun=None, ip_secondary=None):
ip_secondary = ip_secondary or []
port = self.configuration.iscsi_port
portals = map(lambda x: "%s:%s" % (x, port), [ip] + ip_secondary)
return ("%(portals)s,%(target)s %(iqn)s %(lun)s"
% ({'portals': ";".join(portals),
'target': target, 'iqn': iqn, 'lun': lun}))
def show_target(self, iscsi_target, iqn, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
def _get_target_chap_auth(self, context, iscsi_name):
"""Get the current chap auth username and password."""
try:
# 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001'
vol_id = iscsi_name.split(':volume-')[1]
volume_info = self.db.volume_get(context, vol_id)
# 'provider_auth': 'CHAP user_id password'
if volume_info['provider_auth']:
return tuple(volume_info['provider_auth'].split(' ', 3)[1:])
except exception.NotFound:
LOG.debug('Failed to get CHAP auth from DB for %s.', vol_id)
@abc.abstractmethod
def _get_target_and_lun(self, context, volume):
"""Get iscsi target and lun."""
pass
@abc.abstractmethod
def create_iscsi_target(self, name, tid, lun, path,
chap_auth, **kwargs):
pass
@abc.abstractmethod
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
pass
@abc.abstractmethod
def _get_iscsi_target(self, context, vol_id):
pass
@abc.abstractmethod
def _get_target(self, iqn):
pass
class SanISCSITarget(ISCSITarget):
"""iSCSI target for san devices.
San devices are slightly different, they don't need to implement
all of the same things that we need to implement locally fro LVM
and local block devices when we create and manage our own targets.
"""
def __init__(self, *args, **kwargs):
super(SanISCSITarget, self).__init__(*args, **kwargs)
@abc.abstractmethod
def create_export(self, context, volume, volume_path):
pass
@abc.abstractmethod
def remove_export(self, context, volume):
pass
@abc.abstractmethod
def ensure_export(self, context, volume, volume_path):
pass
@abc.abstractmethod
def terminate_connection(self, volume, connector, **kwargs):
pass
# NOTE(jdg): Items needed for local iSCSI target drivers,
# but NOT sans Stub them out here to make abc happy
# Use care when looking at these to make sure something
# that's inheritted isn't dependent on one of
# these.
def _get_target_and_lun(self, context, volume):
pass
def _get_target_chap_auth(self, context, iscsi_name):
pass
def create_iscsi_target(self, name, tid, lun, path,
chap_auth, **kwargs):
pass
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
pass
def _get_iscsi_target(self, context, vol_id):
pass
def _get_target(self, iqn):
pass
|
|
"""This module defines the `IoManager` class
which manages I/O for file objects connected to an existing gdb process
or pty.
"""
import io
import select
import time
from pprint import pformat
from typing import Union, List, Optional, Dict, Any, Tuple
from pygdbmi import gdbmiparser
import os
import logging
from pygdbmi.constants import (
DEFAULT_GDB_TIMEOUT_SEC,
DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
USING_WINDOWS,
GdbTimeoutError,
)
if USING_WINDOWS:
import msvcrt
from ctypes import windll, byref, wintypes, WinError, POINTER # type: ignore
from ctypes.wintypes import HANDLE, DWORD, BOOL
else:
import fcntl
logger = logging.getLogger(__name__)
class IoManager:
def __init__(
self,
stdin: io.BufferedWriter,
stdout: io.BufferedReader,
stderr: Optional[io.BufferedReader],
time_to_check_for_additional_output_sec=DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
):
"""
Manage I/O for file objects created before calling this class
This can be useful if the gdb process is managed elsewhere, or if a
pty is used.
"""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.stdin_fileno = self.stdin.fileno()
self.stdout_fileno = self.stdout.fileno()
self.stderr_fileno = self.stderr.fileno() if self.stderr else -1
self.read_list: List[int] = []
if self.stdout:
self.read_list.append(self.stdout_fileno)
self.write_list = [self.stdin_fileno]
self._incomplete_output: Dict[str, Any] = {"stdout": None, "stderr": None}
self.time_to_check_for_additional_output_sec = (
time_to_check_for_additional_output_sec
)
self._allow_overwrite_timeout_times = (
self.time_to_check_for_additional_output_sec > 0
)
make_non_blocking(self.stdout)
if self.stderr:
make_non_blocking(self.stderr)
def get_gdb_response(
self, timeout_sec: float = DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec: Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout: Whether an exception should be raised if no response was found after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError: if response is not received within timeout_sec
ValueError: if select returned unexpected file number
"""
if timeout_sec < 0:
logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if USING_WINDOWS:
retval = self._get_responses_windows(timeout_sec)
else:
retval = self._get_responses_unix(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval
def _get_responses_windows(self, timeout_sec):
"""Get responses on windows. Assume no support for select and use a while loop."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
responses_list = []
try:
self.stdout.flush()
raw_output = self.stdout.readline().replace(b"\r", b"\n")
responses_list = self._get_responses_list(raw_output, "stdout")
except IOError:
pass
try:
self.stderr.flush()
raw_output = self.stderr.readline().replace(b"\r", b"\n")
responses_list += self._get_responses_list(raw_output, "stderr")
except IOError:
pass
responses += responses_list
if timeout_sec == 0:
break
elif responses_list and self._allow_overwrite_timeout_times:
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses
def _get_responses_unix(self, timeout_sec):
"""Get responses on unix-like system. Use select to wait for output."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
select_timeout = timeout_time_sec - time.time()
if select_timeout <= 0:
select_timeout = 0
events, _, _ = select.select(self.read_list, [], [], select_timeout)
responses_list = None # to avoid infinite loop if using Python 2
for fileno in events:
# new data is ready to read
if fileno == self.stdout_fileno:
self.stdout.flush()
raw_output = self.stdout.read()
stream = "stdout"
elif fileno == self.stderr_fileno:
self.stderr.flush()
raw_output = self.stderr.read()
stream = "stderr"
else:
raise ValueError(
"Developer error. Got unexpected file number %d" % fileno
)
responses_list = self._get_responses_list(raw_output, stream)
responses += responses_list
if timeout_sec == 0: # just exit immediately
break
elif responses_list and self._allow_overwrite_timeout_times:
# update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses
def _get_responses_list(
self, raw_output: bytes, stream: str
) -> List[Dict[Any, Any]]:
"""Get parsed response list from string output
Args:
raw_output (unicode): gdb output to parse
stream (str): either stdout or stderr
"""
responses: List[Dict[Any, Any]] = []
(_new_output, self._incomplete_output[stream],) = _buffer_incomplete_responses(
raw_output, self._incomplete_output.get(stream)
)
if not _new_output:
return responses
response_list = list(
filter(lambda x: x, _new_output.decode(errors="replace").split("\n"))
) # remove blank lines
# parse each response from gdb into a dict, and store in a list
for response in response_list:
if gdbmiparser.response_is_finished(response):
pass
else:
parsed_response = gdbmiparser.parse_response(response)
parsed_response["stream"] = stream
logger.debug("%s", pformat(parsed_response))
responses.append(parsed_response)
return responses
def write(
self,
mi_cmd_to_write: Union[str, List[str]],
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout: bool = True,
read_response: bool = True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write: String to write to gdb. If list, it is joined by newlines.
timeout_sec: Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout: If read_response is True, raise error if no response is received
read_response: Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
TypeError: if mi_cmd_to_write is not valid
"""
# self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
# Ensure proper type of the mi command
if isinstance(mi_cmd_to_write, str):
mi_cmd_to_write_str = mi_cmd_to_write
elif isinstance(mi_cmd_to_write, list):
mi_cmd_to_write_str = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write_str.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write_str + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write_str
if USING_WINDOWS:
# select not implemented in windows for pipes
# assume it's always ready
outputready = [self.stdin_fileno]
else:
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.stdin.write(mi_cmd_to_write_nl.encode()) # type: ignore
# must flush, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.stdin.flush() # type: ignore
else:
logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return []
def _buffer_incomplete_responses(
raw_output: Optional[bytes], buf: Optional[bytes]
) -> Tuple[Optional[bytes], Optional[bytes]]:
"""It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
"""
if raw_output:
if buf:
# concatenate buffer and new output
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
# newline was not found, so assume output is incomplete and store in buffer
buf = raw_output
raw_output = None
elif not raw_output.endswith(b"\n"):
# raw output doesn't end in a newline, so store everything after the last newline (if anything)
# in the buffer, and parse everything before it
remainder_offset = raw_output.rindex(b"\n") + 1
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf)
def make_non_blocking(file_obj: io.IOBase):
"""make file object non-blocking
Windows doesn't have the fcntl module, but someone on
stack overflow supplied this code as an answer, and it works
http://stackoverflow.com/a/34504971/2893090"""
if USING_WINDOWS:
LPDWORD = POINTER(DWORD)
PIPE_NOWAIT = wintypes.DWORD(0x00000001)
SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
SetNamedPipeHandleState.restype = BOOL
h = msvcrt.get_osfhandle(file_obj.fileno()) # type: ignore
res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
if res == 0:
raise ValueError(WinError())
else:
# Set the file status flag (F_SETFL) on the pipes to be non-blocking
# so we can attempt to read from a pipe with no new data without locking
# the program up
fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
|
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FFJORD bijector class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import cache_util
from tensorflow_probability.python.internal import prefer_static
# TODO(b/144156734) Consider moving trace estimators to stand alone module.
def trace_jacobian_hutchinson(
ode_fn,
state_shape,
dtype,
sample_fn=tf.random.normal,
num_samples=1,
seed=None):
"""Generates a function that computes `ode_fn` and expectation of the trace.
Uses Hutchinson's trick to estimate the trace of the Jacobian using automatic
differentiation. This is the approach used in the original FFJORD paper [1].
This method computes unreduced trace, as reduction is performed inside of the
bijector class.
The trace estimate is obtained by computing
```None
Tr[A] approx_equal sum_{i} r[i]^{T} @ A @ r[i]; r[i] - gaussian sample.
```
For details on the original work see [2].
Args:
ode_fn: `Callable(time, state)` that computes time derivative.
state_shape: `TensorShape` representing the shape of the state.
dtype: ``tf.DType` object representing the dtype of `state` tensor.
sample_fn: `Callable(shape, dtype, seed)` that generates random samples with
zero mean and covariance of an identity matrix.
Default value: `tf.random.normal`
num_samples: `Integer` number of random samples to use for trace estimation.
Default value: '1'
seed: PRNG seed compatible with `sample_fn`.
Returns:
augmented_ode_fn: `Callable(time, (state, log_det_jac))` that computes
augmented time derivative `(state_time_derivative, trace_estimation)`.
#### References
[1]: Grathwohl, W., Chen, R. T., Betterncourt, J., Sutskever, I.,
& Duvenaud, D. (2018). Ffjord: Free-form continuous dynamics for
scalable reversible generative models. arXiv preprint arXiv:1810.01367.
http://arxiv.org.abs/1810.01367
[2]: Hutchinson, M. F. (1989). A stochastic estimator of the trace of the
influence matrix for Laplacian smoothing splines. Communications in
Statistics-Simulation and Computation, 18(3), 1059-1076.
"""
random_samples = sample_fn(
prefer_static.concat([[num_samples], state_shape], axis=0),
dtype=dtype, seed=seed)
def augmented_ode_fn(time, state_log_det_jac, **kwargs):
"""Computes both time derivative and trace of the jacobian."""
state, _ = state_log_det_jac
with tf.GradientTape(persistent=True,
watch_accessed_variables=False) as tape:
tape.watch(state)
state_time_derivative = ode_fn(time, state, **kwargs)
def estimate_trace(random_sample):
"""Computes stochastic trace estimate based on a single random_sample."""
# We use first use gradient with `output_gradients` to compute the
# jacobian-value-product and then take a dot product with the random
# sample to obtain the trace estimate as formula above.
jvp = tape.gradient(state_time_derivative, state, random_sample)
return random_sample * jvp
# TODO(dkochkov) switch to vectorized_map once more features are supported.
results = tf.map_fn(estimate_trace, random_samples)
trace_estimates = tf.reduce_mean(results, axis=0)
return state_time_derivative, trace_estimates
return augmented_ode_fn
def trace_jacobian_exact(ode_fn, state_shape, dtype):
"""Generates a function that computes `ode_fn` and trace of the jacobian.
Augments provided `ode_fn` with explicit computation of the trace of the
jacobian. This approach scales quadratically with the number of dimensions.
This method computes unreduced trace, as reduction is performed inside of the
bijector class.
Args:
ode_fn: `Callable(time, state)` that computes time derivative.
state_shape: `TensorShape` representing the shape of the state.
dtype: ``tf.DType` object representing the dtype of `state` tensor.
Returns:
augmented_ode_fn: `Callable(time, (state, log_det_jac))` that computes
augmented time derivative `(state_time_derivative, trace_estimation)`.
"""
del state_shape, dtype # Not used by trace_jacobian_exact
def augmented_ode_fn(time, state_log_det_jac, **kwargs):
"""Computes both time derivative and trace of the jacobian."""
state, _ = state_log_det_jac
ode_fn_with_time = lambda x: ode_fn(time, x, **kwargs)
batch_shape = [prefer_static.size0(state)]
state_time_derivative, diag_jac = tfp_math.diag_jacobian(
xs=state, fn=ode_fn_with_time, sample_shape=batch_shape)
# tfp_math.diag_jacobian returns lists
if isinstance(state_time_derivative, list):
state_time_derivative = state_time_derivative[0]
if isinstance(diag_jac, list):
diag_jac = diag_jac[0]
trace_value = diag_jac
return state_time_derivative, trace_value
return augmented_ode_fn
# TODO(b/142901683) Add Mutually Unbiased Bases for trace estimation.
class FFJORD(bijector.Bijector):
"""Implements a continuous normalizing flow X->Y defined via an ODE.
This bijector implements a continuous dynamics transformation
parameterized by a differential equation, where initial and terminal
conditions correspond to domain (X) and image (Y) i.e.
```None
d/dt[state(t)]=state_time_derivative_fn(t, state(t))
state(initial_time) = X
state(final_time) = Y
```
For this transformation the value of `log_det_jacobian` follows another
differential equation, reducing it to computation of the trace of the jacbian
along the trajectory
```None
state_time_derivative = state_time_derivative_fn(t, state(t))
d/dt[log_det_jac(t)] = Tr(jacobian(state_time_derivative, state(t)))
```
FFJORD constructor takes two functions `ode_solve_fn` and
`trace_augmentation_fn` arguments that customize integration of the
differential equation and trace estimation.
Differential equation integration is performed by a call to `ode_solve_fn`.
Custom `ode_solve_fn` must accept the following arguments:
* ode_fn(time, state, **condition_kwargs): Differential equation to be solved.
Custom `ode_solve_fn`s may optionally support conditional inputs by
accepting a `constants` dict arg and computing gradients wrt the provided
values in **condition_kwargs.
* initial_time: Scalar float or floating Tensor representing the initial time.
* initial_state: Floating Tensor representing the initial state.
* solution_times: 1D floating Tensor of solution times.
And return a Tensor of shape [solution_times.shape, initial_state.shape]
representing state values evaluated at `solution_times`. In addition
`ode_solve_fn` must support nested structures. For more details see the
interface of `tfp.math.ode.Solver.solve()`.
Trace estimation is computed simultaneously with `state_time_derivative`
using `augmented_state_time_derivative_fn` that is generated by
`trace_augmentation_fn`. `trace_augmentation_fn` takes
`state_time_derivative_fn`, `state.shape` and `state.dtype` arguments and
returns a `augmented_state_time_derivative_fn` callable that computes both
`state_time_derivative` and unreduced `trace_estimation`.
#### Custom `ode_solve_fn` and `trace_augmentation_fn` examples:
```python
# custom_solver_fn: `callable(f, t_initial, t_solutions, y_initial, ...)`
# custom_solver_kwargs: Additional arguments to pass to custom_solver_fn.
def ode_solve_fn(ode_fn, initial_time, initial_state, solution_times):
results = custom_solver_fn(ode_fn, initial_time, solution_times,
initial_state, **custom_solver_kwargs)
return results
ffjord = tfb.FFJORD(state_time_derivative_fn, ode_solve_fn=ode_solve_fn)
```
```python
# state_time_derivative_fn: `callable(time, state)`
# trace_jac_fn: `callable(time, state)` unreduced jacobian trace function
def trace_augmentation_fn(ode_fn, state_shape, state_dtype):
def augmented_ode_fn(time, state):
return ode_fn(time, state), trace_jac_fn(time, state)
return augmented_ode_fn
ffjord = tfb.FFJORD(state_time_derivative_fn,
trace_augmentation_fn=trace_augmentation_fn)
```
For more details on FFJORD and continous normalizing flows see [1], [2].
#### Usage example:
```python
tfd = tfp.distributions
tfb = tfp.bijectors
# state_time_derivative_fn: `Callable(time, state)` -> state_time_derivative
# e.g. Neural network with inputs and outputs of the same shapes and dtypes.
bijector = tfb.FFJORD(state_time_derivative_fn=state_time_derivative_fn)
y = bijector.forward(x) # forward mapping
x = bijector.inverse(y) # inverse mapping
base = tfd.Normal(tf.zeros_like(x), tf.ones_like(x)) # Base distribution
transformed_distribution = tfd.TransformedDistribution(base, bijector)
```
#### References
[1]: Chen, T. Q., Rubanova, Y., Bettencourt, J., & Duvenaud, D. K. (2018).
Neural ordinary differential equations. In Advances in neural
information processing systems (pp. 6571-6583)
[2]: Grathwohl, W., Chen, R. T., Betterncourt, J., Sutskever, I.,
& Duvenaud, D. (2018). Ffjord: Free-form continuous dynamics for
scalable reversible generative models. arXiv preprint
arXiv:1810.01367.
http://arxiv.org.abs/1810.01367
"""
# FFJORD simultaneously computes `forward` and `fldj` (and `inverse`/`ildj`),
# so we override the bijector cache to update the LDJ entries of attrs on
# forward/inverse inverse calls (instead of updating them only when the LDJ
# methods themselves are called).
_cache = cache_util.BijectorCacheWithGreedyAttrs(
forward_name='_augmented_forward',
inverse_name='_augmented_inverse')
def __init__(
self,
state_time_derivative_fn,
ode_solve_fn=None,
trace_augmentation_fn=trace_jacobian_hutchinson,
initial_time=0.,
final_time=1.,
validate_args=False,
dtype=tf.float32,
name='ffjord'):
"""Constructs a FFJORD bijector.
Args:
state_time_derivative_fn: Python `callable` taking arguments `time`
(a scalar representing time) and `state` (a Tensor representing the
state at given `time`) returning the time derivative of the `state` at
given `time`.
ode_solve_fn: Python `callable` taking arguments `ode_fn` (same as
`state_time_derivative_fn` above), `initial_time` (a scalar representing
the initial time of integration), `initial_state` (a Tensor of floating
dtype represents the initial state) and `solution_times` (1D Tensor of
floating dtype representing time at which to obtain the solution)
returning a Tensor of shape [time_axis, initial_state.shape]. Will take
`[final_time]` as the `solution_times` argument and
`state_time_derivative_fn` as `ode_fn` argument. For details on
providing custom `ode_solve_fn` see class docstring.
If `None` a DormandPrince solver from `tfp.math.ode` is used.
Default value: None
trace_augmentation_fn: Python `callable` taking arguments `ode_fn` (
python `callable` same as `state_time_derivative_fn` above),
`state_shape` (TensorShape of a the state), `dtype` (same as dtype of
the state) and returning a python `callable` taking arguments `time`
(a scalar representing the time at which the function is evaluted),
`state` (a Tensor representing the state at given `time`) that computes
a tuple (`ode_fn(time, state)`, `jacobian_trace_estimation`).
`jacobian_trace_estimation` should represent trace of the jacobian of
`ode_fn` with respect to `state`. `state_time_derivative_fn` will be
passed as `ode_fn` argument. For details on providing custom
`trace_augmentation_fn` see class docstring.
Default value: tfp.bijectors.ffjord.trace_jacobian_hutchinson
initial_time: Scalar float representing time to which the `x` value of the
bijector corresponds to. Passed as `initial_time` to `ode_solve_fn`.
For default solver can be Python `float` or floating scalar `Tensor`.
Default value: 0.
final_time: Scalar float representing time to which the `y` value of the
bijector corresponds to. Passed as `solution_times` to `ode_solve_fn`.
For default solver can be Python `float` or floating scalar `Tensor`.
Default value: 1.
validate_args: Python 'bool' indicating whether to validate input.
Default value: False
dtype: `tf.DType` to prefer when converting args to `Tensor`s. Else, we
fall back to a common dtype inferred from the args, finally falling
back to float32.
name: Python `str` name prefixed to Ops created by this function.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._initial_time = initial_time
self._final_time = final_time
self._ode_solve_fn = ode_solve_fn
if self._ode_solve_fn is None:
self._ode_solver = tfp_math.ode.DormandPrince()
self._ode_solve_fn = self._ode_solver.solve
self._trace_augmentation_fn = trace_augmentation_fn
self._state_time_derivative_fn = state_time_derivative_fn
def inverse_state_time_derivative(time, state, **kwargs):
return -state_time_derivative_fn(self._final_time - time, state,
**kwargs)
self._inv_state_time_derivative_fn = inverse_state_time_derivative
super(FFJORD, self).__init__(
forward_min_event_ndims=0,
dtype=dtype,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict()
def _solve_ode(self, ode_fn, state, **kwargs):
"""Solves the initial value problem defined by `ode_fn`.
Args:
ode_fn: `Callable(time, state)` that represents state time derivative.
state: A `Tensor` representing initial state.
**kwargs: Additional arguments to pass to ode_solve_fn.
Returns:
final_state: `Tensor` of the same shape and dtype as `state` representing
the solution of ODE defined by `ode_fn` at `self._final_time`.
"""
integration_result = self._ode_solve_fn(
ode_fn=ode_fn,
initial_time=self._initial_time,
initial_state=state,
solution_times=[self._final_time],
**kwargs)
final_state = tf.nest.map_structure(
lambda x: x[-1], integration_result.states)
return final_state
def _augmented_forward(self, x, **condition_kwargs):
"""Computes forward and forward_log_det_jacobian transformations."""
augmented_ode_fn = self._trace_augmentation_fn(
self._state_time_derivative_fn, prefer_static.shape(x), x.dtype)
augmented_x = (x, tf.zeros_like(x))
if condition_kwargs:
y, fldj = self._solve_ode(augmented_ode_fn, augmented_x,
constants=condition_kwargs)
else:
y, fldj = self._solve_ode(augmented_ode_fn, augmented_x)
return y, {'ildj': -fldj, 'fldj': fldj}
def _augmented_inverse(self, y, **condition_kwargs):
"""Computes inverse and inverse_log_det_jacobian transformations."""
augmented_inv_ode_fn = self._trace_augmentation_fn(
self._inv_state_time_derivative_fn, prefer_static.shape(y), y.dtype)
augmented_y = (y, tf.zeros_like(y))
if condition_kwargs:
x, ildj = self._solve_ode(augmented_inv_ode_fn, augmented_y,
constants=condition_kwargs)
else:
x, ildj = self._solve_ode(augmented_inv_ode_fn, augmented_y)
return x, {'ildj': ildj, 'fldj': -ildj}
def _forward(self, x, **condition_kwargs):
y, _ = self._augmented_forward(x, **condition_kwargs)
return y
def _inverse(self, y, **condition_kwargs):
x, _ = self._augmented_inverse(y, **condition_kwargs)
return x
def _forward_log_det_jacobian(self, x, **condition_kwargs):
cached = self._cache.forward_attributes(x)
# If LDJ isn't in the cache, call forward once.
if 'fldj' not in cached:
_, attrs = self._augmented_forward(x, **condition_kwargs)
cached.update(attrs)
return cached['fldj']
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
cached = self._cache.inverse_attributes(y)
# If LDJ isn't in the cache, call inverse once.
if 'ildj' not in cached:
_, attrs = self._augmented_inverse(y, **condition_kwargs)
cached.update(attrs)
return cached['ildj']
|
|
"""This file show how we can use Pycuda compiled fct in a Theano
Op. Do no use those op in production code. See the TODO.
You can use them as a guide to use your pycuda code into a Theano op.
The PycudaElemwiseSourceModuleOp is a Theano op use pycuda code
generated with pycuda.compiler.SourceModule
Their is a test in test_pycuda.py.
This don't work with broadcast and non-contiguous memory as pycuda
don't support that, but we make sure we don't introduce problem.
If the memory is non-contiguous, we create a new copy that is contiguous.
If their is broadcasted dimensions, we raise an error.
#The following is commented as it work only with old pycuda version
The PycudaElemwiseKernelOp op use pycuda code generated with
pycuda.elementwise.ElementwiseKernel. It must be wrapper by
TheanoElementwiseKernel.
"""
from __future__ import absolute_import, print_function, division
from itertools import chain
import numpy
import theano
from six.moves import xrange
from theano.compat import izip
from theano.gof import Op, Apply, local_optimizer, EquilibriumDB
from theano.gof.utils import hash_from_dict
from theano.sandbox.cuda import GpuElemwise, CudaNdarrayType, GpuOp
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
gpu_contiguous)
from theano.sandbox.cuda.opt import gpu_seqopt
import pycuda
from pycuda.compiler import SourceModule
import pycuda.gpuarray
from . import pycuda_init
if not pycuda_init.pycuda_available:
raise Exception("No pycuda available. You can't load pycuda_example.py")
def _replace_npy_types(c_arg):
c_arg = c_arg.replace('npy_float32', 'float')
c_arg = c_arg.replace('npy_float64', 'double')
c_arg = c_arg.replace('npy_int32', 'int')
c_arg = c_arg.replace('npy_int8', 'char')
c_arg = c_arg.replace('npy_ucs4', 'unsigned int')
c_arg = c_arg.replace('npy_uint32', 'unsigned int')
c_arg = c_arg.replace('npy_uint16', 'unsigned short')
c_arg = c_arg.replace('npy_uint8', 'unsigned char')
return c_arg
def theano_parse_c_arg(c_arg):
c_arg = _replace_npy_types(c_arg)
return pycuda.tools.parse_c_arg(c_arg)
"""
class TheanoElementwiseKernel(pycuda.elementwise.ElementwiseKernel):
def __init__(self, arguments, operation,
name="kernel", keep=False, options=None, **kwargs):
if options is None:
options = []
if isinstance(arguments, string_types):
arguments = [theano_parse_c_arg(arg)
for arg in arguments.split(",")]
pycuda.elementwise.ElementwiseKernel.__init__(self, arguments,
operation, name, keep,
options, **kwargs)
def __call__(self, *args):
vectors = []
invocation_args = []
for arg, arg_descr in zip(args, self.gen_kwargs["arguments"]):
if isinstance(arg_descr, VectorArg):
vectors.append(arg)
invocation_args.append(arg.gpudata)
else:
invocation_args.append(arg)
repr_vec = vectors[0]
invocation_args.append(repr_vec.mem_size)
if hasattr(repr_vec, "_block") and hasattr(repr_vec, "_grid"):
self.func.set_block_shape(*repr_vec._block)
self.func.prepared_call(repr_vec._grid, *invocation_args)
else:
_grid, _block = pycuda.gpuarray.splay(repr_vec.mem_size)
self.func.set_block_shape(*_block)
self.func.prepared_call(_grid, *invocation_args)
class PycudaElemwiseKernelOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
def __init__(self, scalar_op, inplace_pattern=None, name=None):
if inplace_pattern is None:
inplace_pattern = {}
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = self.inplace_pattern.items()
items.sort()
return self.__class__.__name__ + "{%s}%s" % (self.scalar_op,
str(items))
else:
return self.__class__.__name__ + "{%s}" % (self.scalar_op)
else:
return self.name
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern)
def __hash__(self):
return (hash(type(self)) ^ hash(self.scalar_op) ^
hash_from_dict(self.inplace_pattern))
def make_node(self, *inputs):
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('different ranks among inputs')
if any([any(i.type.broadcastable) for i in inputs]):
raise Exception("pycuda don't support broadcasted dimensions")
assert len(inputs) == 2 # TODO remove
# output is broadcastable only along dimensions where all inputs are
# broadcastable
broadcastable = []
for d in xrange(_inputs[0].type.ndim):
bcast_d = True
for i in _inputs:
if not i.type.broadcastable[d]:
bcast_d = False
break
broadcastable.append(bcast_d)
assert len(broadcastable) == _inputs[0].type.ndim
otype = CudaNdarrayType(broadcastable=broadcastable)
assert self.nout == 1
out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
in_name = ["i" + str(id) for id in range(len(inputs))]
out_name = ["o" + str(id) for id in range(self.nout)]
c_code = self.scalar_op.c_code(out_node, "some_name",
tuple([n + "[i]"for n in in_name]),
tuple(n + "[i]"for n in out_name), {})
self.pycuda_fct = TheanoElementwiseKernel(
", ".join([var.type.dtype_specs()[1] + " *" + name
for var, name in (zip(inputs, in_name) +
zip(out_node.outputs, out_name))]),
c_code,
"pycuda_elemwise_kernel_%s" % str(self.scalar_op),
preamble=("#include<Python.h>\n"
"#include <numpy/arrayobject.h>"))
return out_node
def perform(self, node, inputs, out):
#TODO assert all input have the same shape
z, = out
if z[0] is None or z[0].shape != inputs[0].shape:
z[0] = theano.sandbox.cuda.CudaNdarray.zeros(inputs[0].shape)
i = inputs + z
self.pycuda_fct(*i)
"""
class PycudaElemwiseSourceModuleOp(GpuOp):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
def __init__(self, scalar_op, inplace_pattern=None, name=None):
if inplace_pattern is None:
inplace_pattern = {}
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = list(self.inplace_pattern.items())
items.sort()
return self.__class__.__name__ + "{%s}%s" % (self.scalar_op,
str(items))
else:
return self.__class__.__name__ + "{%s}" % (self.scalar_op)
else:
return self.name
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op and
self.inplace_pattern == other.inplace_pattern)
def __hash__(self):
return (hash(type(self)) ^ hash(self.scalar_op) ^
hash_from_dict(self.inplace_pattern))
def make_node(self, *inputs):
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('different ranks among inputs')
if any([any(i.type.broadcastable) for i in inputs]):
raise Exception("pycuda don't support broadcasted dimensions")
assert len(inputs) == 2 # TODO remove
otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim)
assert self.nout == 1
fct_name = "pycuda_elemwise_%s" % str(self.scalar_op)
out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
in_name = ["i" + str(id) for id in range(len(inputs))]
out_name = ["o" + str(id) for id in range(self.nout)]
c_code = self.scalar_op.c_code(out_node, "some_name",
tuple([n + "[i]" for n in in_name]),
tuple(n + "[i]" for n in out_name), {})
c_code_param = ", ".join(
[_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name
for var, name in chain(izip(inputs, in_name),
izip(out_node.outputs, out_name))] +
["int size"])
mod = SourceModule("""
__global__ void %s(%s)
{
int i = (blockIdx.x+blockIdx.y*gridDim.x)*(blockDim.x*blockDim.y);
i += threadIdx.x + threadIdx.y*blockDim.x;
if(i<size){
%s
}
}
""" % (fct_name, c_code_param, c_code))
self.pycuda_fct = mod.get_function(fct_name)
return out_node
def perform(self, node, inputs, out):
# TODO support broadcast!
# TODO assert all input have the same shape
z, = out
if (z[0] is None or
z[0].shape != inputs[0].shape or
not z[0].is_c_contiguous()):
z[0] = theano.sandbox.cuda.CudaNdarray.zeros(inputs[0].shape)
if inputs[0].shape != inputs[1].shape:
raise TypeError("PycudaElemwiseSourceModuleOp:"
" inputs don't have the same shape!")
if inputs[0].size > 512:
grid = (int(numpy.ceil(inputs[0].size / 512.)), 1)
block = (512, 1, 1)
else:
grid = (1, 1)
block = (inputs[0].shape[0], inputs[0].shape[1], 1)
self.pycuda_fct(inputs[0], inputs[1], z[0],
numpy.intc(inputs[1].size), block=block, grid=grid)
class PycudaElemwiseSourceModuleMakeThunkOp(Op):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
__props__ = ("scalar_op", "inplace_pattern")
def __init__(self, scalar_op, inplace_pattern=None, name=None):
if inplace_pattern is None:
inplace_pattern = {}
self.name = name
self.scalar_op = scalar_op
self.inplace_pattern = inplace_pattern
# As we have a dict in props, we need to implement __hash__
def __hash__(self):
return hash((type(self), hash(self.scalar_op),
hash_from_dict(self.inplace_pattern)))
def __str__(self):
if self.name is None:
if self.inplace_pattern:
items = list(self.inplace_pattern.items())
items.sort()
return self.__class__.__name__ + "{%s}%s" % (self.scalar_op,
str(items))
else:
return self.__class__.__name__ + "{%s}" % (self.scalar_op)
else:
return self.name
def make_node(self, *inputs):
assert self.nout == 1
assert len(inputs) == 2 # TODO remove
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('different ranks among inputs')
if any([any(i.type.broadcastable) for i in inputs]):
raise Exception("pycuda don't support broadcasted dimensions")
otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim)
out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
return out_node
def make_thunk(self, node, storage_map, _, _2):
# TODO support broadcast!
# TODO assert all input have the same shape
fct_name = "pycuda_elemwise_%s" % str(self.scalar_op)
in_name = ["i" + str(id) for id in range(len(node.inputs))]
out_name = ["o" + str(id) for id in range(self.nout)]
c_code = self.scalar_op.c_code(node, "some_name",
tuple([n + "[i]" for n in in_name]),
tuple(n + "[i]" for n in out_name), {})
c_code_param = ", ".join(
[_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name
for var, name in chain(izip(node.inputs, in_name),
izip(node.outputs, out_name))] +
["int size"])
mod = SourceModule("""
__global__ void %s(%s)
{
int i = (blockIdx.x+blockIdx.y*gridDim.x)*(blockDim.x*blockDim.y);
i += threadIdx.x + threadIdx.y*blockDim.x;
if(i<size){
%s
}
}
""" % (fct_name, c_code_param, c_code))
pycuda_fct = mod.get_function(fct_name)
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
z = outputs[0]
if (z[0] is None or
z[0].shape != inputs[0][0].shape or
not z[0].is_c_contiguous()):
z[0] = theano.sandbox.cuda.CudaNdarray.zeros(
inputs[0][0].shape)
if inputs[0][0].shape != inputs[1][0].shape:
raise TypeError("PycudaElemwiseSourceModuleMakeThunkOp:"
" inputs don't have the same shape!")
if inputs[0][0].size > 512:
grid = (int(numpy.ceil(inputs[0][0].size / 512.)), 1)
block = (512, 1, 1)
else:
grid = (1, 1)
block = (inputs[0][0].shape[0], inputs[0][0].shape[1], 1)
pycuda_fct(inputs[0][0], inputs[1][0], z[0],
numpy.intc(inputs[1][0].size), block=block,
grid=grid)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
pycuda_optimizer = EquilibriumDB()
gpu_seqopt.register("pycuda_optimizer", pycuda_optimizer, 1.5, "fast_run")
@local_optimizer([GpuElemwise])
def local_pycuda_gpu_elemwise(node):
"""
GpuElemwise -> PycudaElemwiseSourceModuleOp
"""
if isinstance(node.op, GpuElemwise):
if (not any([any(i.type.broadcastable) for i in node.inputs]) and
all([i.ndim <= 2 for i in node.inputs])):
new_op = PycudaElemwiseSourceModuleOp(node.op.scalar_op,
node.op.inplace_pattern)(
*node.inputs)
return [new_op]
pycuda_optimizer.register("local_pycuda_gpu_elemwise",
local_pycuda_gpu_elemwise)
"""
@local_optimizer([GpuElemwise])
def local_pycuda_gpu_elemwise_kernel(node):
""
GpuElemwise -> PycudaElemwiseKernelOp
""
if isinstance(node.op, GpuElemwise):
if not any([any(i.type.broadcastable) for i in node.inputs]):
new_op = PycudaElemwiseKernelOp(node.op.scalar_op,
node.op.inplace_pattern)(
*node.inputs)
return [new_op]
pycuda_optimizer.register("local_pycuda_gpu_elemwise_kernel",
local_pycuda_gpu_elemwise_kernel, 1.5)
"""
|
|
# Copyright 2013 Beijing Huron Technology Co.Ltd.
#
# Authors: Li Xipeng <lixipeng@hihuron.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""<Project_name> base exception handling.
Includes decorator for re-raising <Project_name>-type exceptions.
SHOULD include dedicated exception logging.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import exception as obj_exc
import six
import webob.exc
from <project_name>.i18n import _, _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=500, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class <Project_name>Exception(Exception):
"""Base <Project_name> Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(<Project_name>Exception, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return six.text_type(self.msg)
class NotAuthorized(<Project_name>Exception):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class Invalid(<Project_name>Exception):
message = _("Unacceptable parameters.")
code = 400
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(<Project_name>Exception):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(<Project_name>Exception):
message = _("Resource could not be found.")
code = 404
safe = True
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class MalformedRequestBody(<Project_name>Exception):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(<Project_name>Exception):
message = _("No valid host was found. %(reason)s")
class NoMoreTargets(<Project_name>Exception):
"""No more available targets."""
pass
class KeyManagerError(<Project_name>Exception):
message = _("key manager error: %(reason)s")
class EvaluatorParseException(Exception):
message = _("Error during evaluator parsing: %(reason)s")
UnsupportedObjectError = obj_exc.UnsupportedObjectError
OrphanedObjectError = obj_exc.OrphanedObjectError
IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion
ReadOnlyFieldError = obj_exc.ReadOnlyFieldError
ObjectActionError = obj_exc.ObjectActionError
ObjectFieldInvalid = obj_exc.ObjectFieldInvalid
|
|
from dbt.contracts.graph.manifest import CompileResultNode
from dbt.contracts.graph.unparsed import (
FreshnessThreshold
)
from dbt.contracts.graph.parsed import ParsedSourceDefinition
from dbt.contracts.util import (
BaseArtifactMetadata,
ArtifactMixin,
VersionedSchema,
Replaceable,
schema_version,
)
from dbt.exceptions import InternalException
from dbt.events.functions import fire_event
from dbt.events.types import TimingInfoCollected
from dbt.logger import (
TimingProcessor,
JsonOnly,
)
from dbt.utils import lowercase
from dbt.dataclass_schema import dbtClassMixin, StrEnum
import agate
from dataclasses import dataclass, field
from datetime import datetime
from typing import (
Union, Dict, List, Optional, Any, NamedTuple, Sequence,
)
from dbt.clients.system import write_json
@dataclass
class TimingInfo(dbtClassMixin):
name: str
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
def begin(self):
self.started_at = datetime.utcnow()
def end(self):
self.completed_at = datetime.utcnow()
class collect_timing_info:
def __init__(self, name: str):
self.timing_info = TimingInfo(name=name)
def __enter__(self):
self.timing_info.begin()
return self.timing_info
def __exit__(self, exc_type, exc_value, traceback):
self.timing_info.end()
with JsonOnly(), TimingProcessor(self.timing_info):
fire_event(TimingInfoCollected())
class RunningStatus(StrEnum):
Started = 'started'
Compiling = 'compiling'
Executing = 'executing'
class NodeStatus(StrEnum):
Success = "success"
Error = "error"
Fail = "fail"
Warn = "warn"
Skipped = "skipped"
Pass = "pass"
RuntimeErr = "runtime error"
class RunStatus(StrEnum):
Success = NodeStatus.Success
Error = NodeStatus.Error
Skipped = NodeStatus.Skipped
class TestStatus(StrEnum):
Pass = NodeStatus.Pass
Error = NodeStatus.Error
Fail = NodeStatus.Fail
Warn = NodeStatus.Warn
Skipped = NodeStatus.Skipped
class FreshnessStatus(StrEnum):
Pass = NodeStatus.Pass
Warn = NodeStatus.Warn
Error = NodeStatus.Error
RuntimeErr = NodeStatus.RuntimeErr
@dataclass
class BaseResult(dbtClassMixin):
status: Union[RunStatus, TestStatus, FreshnessStatus]
timing: List[TimingInfo]
thread_id: str
execution_time: float
adapter_response: Dict[str, Any]
message: Optional[str]
failures: Optional[int]
@classmethod
def __pre_deserialize__(cls, data):
data = super().__pre_deserialize__(data)
if 'message' not in data:
data['message'] = None
if 'failures' not in data:
data['failures'] = None
return data
@dataclass
class NodeResult(BaseResult):
node: CompileResultNode
@dataclass
class RunResult(NodeResult):
agate_table: Optional[agate.Table] = field(
default=None, metadata={
'serialize': lambda x: None, 'deserialize': lambda x: None
}
)
@property
def skipped(self):
return self.status == RunStatus.Skipped
@dataclass
class ExecutionResult(dbtClassMixin):
results: Sequence[BaseResult]
elapsed_time: float
def __len__(self):
return len(self.results)
def __iter__(self):
return iter(self.results)
def __getitem__(self, idx):
return self.results[idx]
@dataclass
class RunResultsMetadata(BaseArtifactMetadata):
dbt_schema_version: str = field(
default_factory=lambda: str(RunResultsArtifact.dbt_schema_version)
)
@dataclass
class RunResultOutput(BaseResult):
unique_id: str
def process_run_result(result: RunResult) -> RunResultOutput:
return RunResultOutput(
unique_id=result.node.unique_id,
status=result.status,
timing=result.timing,
thread_id=result.thread_id,
execution_time=result.execution_time,
message=result.message,
adapter_response=result.adapter_response,
failures=result.failures
)
@dataclass
class RunExecutionResult(
ExecutionResult,
):
results: Sequence[RunResult]
args: Dict[str, Any] = field(default_factory=dict)
generated_at: datetime = field(default_factory=datetime.utcnow)
def write(self, path: str):
writable = RunResultsArtifact.from_execution_results(
results=self.results,
elapsed_time=self.elapsed_time,
generated_at=self.generated_at,
args=self.args,
)
writable.write(path)
@dataclass
@schema_version('run-results', 4)
class RunResultsArtifact(ExecutionResult, ArtifactMixin):
results: Sequence[RunResultOutput]
args: Dict[str, Any] = field(default_factory=dict)
@classmethod
def from_execution_results(
cls,
results: Sequence[RunResult],
elapsed_time: float,
generated_at: datetime,
args: Dict,
):
processed_results = [process_run_result(result) for result in results]
meta = RunResultsMetadata(
dbt_schema_version=str(cls.dbt_schema_version),
generated_at=generated_at,
)
return cls(
metadata=meta,
results=processed_results,
elapsed_time=elapsed_time,
args=args
)
def write(self, path: str):
write_json(path, self.to_dict(omit_none=False))
@dataclass
class RunOperationResult(ExecutionResult):
success: bool
@dataclass
class RunOperationResultMetadata(BaseArtifactMetadata):
dbt_schema_version: str = field(default_factory=lambda: str(
RunOperationResultsArtifact.dbt_schema_version
))
@dataclass
@schema_version('run-operation-result', 1)
class RunOperationResultsArtifact(RunOperationResult, ArtifactMixin):
@classmethod
def from_success(
cls,
success: bool,
elapsed_time: float,
generated_at: datetime,
):
meta = RunOperationResultMetadata(
dbt_schema_version=str(cls.dbt_schema_version),
generated_at=generated_at,
)
return cls(
metadata=meta,
results=[],
elapsed_time=elapsed_time,
success=success,
)
# due to issues with typing.Union collapsing subclasses, this can't subclass
# PartialResult
@dataclass
class SourceFreshnessResult(NodeResult):
node: ParsedSourceDefinition
status: FreshnessStatus
max_loaded_at: datetime
snapshotted_at: datetime
age: float
@property
def skipped(self):
return False
class FreshnessErrorEnum(StrEnum):
runtime_error = 'runtime error'
@dataclass
class SourceFreshnessRuntimeError(dbtClassMixin):
unique_id: str
error: Optional[Union[str, int]]
status: FreshnessErrorEnum
@dataclass
class SourceFreshnessOutput(dbtClassMixin):
unique_id: str
max_loaded_at: datetime
snapshotted_at: datetime
max_loaded_at_time_ago_in_s: float
status: FreshnessStatus
criteria: FreshnessThreshold
adapter_response: Dict[str, Any]
timing: List[TimingInfo]
thread_id: str
execution_time: float
@dataclass
class PartialSourceFreshnessResult(NodeResult):
status: FreshnessStatus
@property
def skipped(self):
return False
FreshnessNodeResult = Union[PartialSourceFreshnessResult,
SourceFreshnessResult]
FreshnessNodeOutput = Union[SourceFreshnessRuntimeError, SourceFreshnessOutput]
def process_freshness_result(
result: FreshnessNodeResult
) -> FreshnessNodeOutput:
unique_id = result.node.unique_id
if result.status == FreshnessStatus.RuntimeErr:
return SourceFreshnessRuntimeError(
unique_id=unique_id,
error=result.message,
status=FreshnessErrorEnum.runtime_error,
)
# we know that this must be a SourceFreshnessResult
if not isinstance(result, SourceFreshnessResult):
raise InternalException(
'Got {} instead of a SourceFreshnessResult for a '
'non-error result in freshness execution!'
.format(type(result))
)
# if we're here, we must have a non-None freshness threshold
criteria = result.node.freshness
if criteria is None:
raise InternalException(
'Somehow evaluated a freshness result for a source '
'that has no freshness criteria!'
)
return SourceFreshnessOutput(
unique_id=unique_id,
max_loaded_at=result.max_loaded_at,
snapshotted_at=result.snapshotted_at,
max_loaded_at_time_ago_in_s=result.age,
status=result.status,
criteria=criteria,
adapter_response=result.adapter_response,
timing=result.timing,
thread_id=result.thread_id,
execution_time=result.execution_time,
)
@dataclass
class FreshnessMetadata(BaseArtifactMetadata):
dbt_schema_version: str = field(
default_factory=lambda: str(
FreshnessExecutionResultArtifact.dbt_schema_version
)
)
@dataclass
class FreshnessResult(ExecutionResult):
metadata: FreshnessMetadata
results: Sequence[FreshnessNodeResult]
@classmethod
def from_node_results(
cls,
results: List[FreshnessNodeResult],
elapsed_time: float,
generated_at: datetime,
):
meta = FreshnessMetadata(generated_at=generated_at)
return cls(metadata=meta, results=results, elapsed_time=elapsed_time)
@dataclass
@schema_version('sources', 3)
class FreshnessExecutionResultArtifact(
ArtifactMixin,
VersionedSchema,
):
metadata: FreshnessMetadata
results: Sequence[FreshnessNodeOutput]
elapsed_time: float
@classmethod
def from_result(cls, base: FreshnessResult):
processed = [process_freshness_result(r) for r in base.results]
return cls(
metadata=base.metadata,
results=processed,
elapsed_time=base.elapsed_time,
)
Primitive = Union[bool, str, float, None]
PrimitiveDict = Dict[str, Primitive]
CatalogKey = NamedTuple(
'CatalogKey',
[('database', Optional[str]), ('schema', str), ('name', str)]
)
@dataclass
class StatsItem(dbtClassMixin):
id: str
label: str
value: Primitive
include: bool
description: Optional[str] = None
StatsDict = Dict[str, StatsItem]
@dataclass
class ColumnMetadata(dbtClassMixin):
type: str
index: int
name: str
comment: Optional[str] = None
ColumnMap = Dict[str, ColumnMetadata]
@dataclass
class TableMetadata(dbtClassMixin):
type: str
schema: str
name: str
database: Optional[str] = None
comment: Optional[str] = None
owner: Optional[str] = None
@dataclass
class CatalogTable(dbtClassMixin, Replaceable):
metadata: TableMetadata
columns: ColumnMap
stats: StatsDict
# the same table with two unique IDs will just be listed two times
unique_id: Optional[str] = None
def key(self) -> CatalogKey:
return CatalogKey(
lowercase(self.metadata.database),
self.metadata.schema.lower(),
self.metadata.name.lower(),
)
@dataclass
class CatalogMetadata(BaseArtifactMetadata):
dbt_schema_version: str = field(
default_factory=lambda: str(CatalogArtifact.dbt_schema_version)
)
@dataclass
class CatalogResults(dbtClassMixin):
nodes: Dict[str, CatalogTable]
sources: Dict[str, CatalogTable]
errors: Optional[List[str]] = None
_compile_results: Optional[Any] = None
def __post_serialize__(self, dct):
dct = super().__post_serialize__(dct)
if '_compile_results' in dct:
del dct['_compile_results']
return dct
@dataclass
@schema_version('catalog', 1)
class CatalogArtifact(CatalogResults, ArtifactMixin):
metadata: CatalogMetadata
@classmethod
def from_results(
cls,
generated_at: datetime,
nodes: Dict[str, CatalogTable],
sources: Dict[str, CatalogTable],
compile_results: Optional[Any],
errors: Optional[List[str]]
) -> 'CatalogArtifact':
meta = CatalogMetadata(generated_at=generated_at)
return cls(
metadata=meta,
nodes=nodes,
sources=sources,
errors=errors,
_compile_results=compile_results,
)
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("SOSD", "sosd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import os
import pickle
import random
import subprocess
import sys
import time
import unittest
from test import support
try:
import threading
except ImportError:
threading = None
import _testcapi
skips = []
if support.check_impl_detail(pypy=True):
skips += [
'test_Z_code', # test_{Z,u}_code require PY_SSIZE_T_CLEAN support
'test_u_code',
'test_broken_memoryview',
'test_capsule',
'test_lazy_hash_inheritance',
'test_widechar',
'TestThreadState'
]
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertEqual(err.rstrip(),
b'Fatal Python error:'
b' PyThreadState_Get: no current thread')
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
@unittest.skipIf(support.check_impl_detail(pypy=True),
'Py_AddPendingCall not currently supported.')
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
def test_subinterps(self):
# XXX this test leaks in refleak runs
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = _testcapi.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTest(unittest.TestCase):
@unittest.skipIf(
sys.platform.startswith('win'),
"test doesn't work under Windows")
def test_subinterps(self):
# XXX only tested under Unix checkouts
basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
oldcwd = os.getcwd()
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
os.chdir(basepath)
try:
exe = os.path.join(basepath, "Modules", "_testembed")
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
p = subprocess.Popen([exe],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
if support.verbose:
print()
print(out.decode('latin1'))
print(err.decode('latin1'))
finally:
os.chdir(oldcwd)
def test_main():
support.run_unittest(CAPITest, TestPendingCalls, Test6012, EmbeddingTest)
for name in dir(_testcapi):
if name.startswith('test_') and name not in skips:
test = getattr(_testcapi, name)
if support.verbose:
print("internal", name)
test()
# some extra thread-state tests driven via _testcapi
def TestThreadState():
if support.verbose:
print("auto-thread-state")
idents = []
def callback():
idents.append(_thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(_thread.get_ident()) != 3:
raise support.TestFailed(
"Couldn't find main thread correctly in the list")
if threading and 'TestThreadState' not in skips:
import _thread
import time
TestThreadState()
t = threading.Thread(target=TestThreadState)
t.start()
t.join()
if __name__ == "__main__":
test_main()
|
|
# Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013, 2014 Bob Tolbert <bob@tolbert.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.compiler import hy_compile, HyTypeError
from hy.models import HyObject, replace_hy_obj
from hy.lex import tokenize, LexException
from hy.errors import HyIOError
from io import open
import marshal
import imp
import sys
import ast
import os
import __future__
from hy._compat import PY3, PY33, MAGIC, builtins, long_type, wr_long
from hy._compat import string_types
def ast_compile(ast, filename, mode):
"""Compile AST.
Like Python's compile, but with some special flags."""
flags = (__future__.CO_FUTURE_DIVISION |
__future__.CO_FUTURE_PRINT_FUNCTION)
return compile(ast, filename, mode, flags)
def import_buffer_to_hst(buf):
"""Import content from buf and return an Hy AST."""
return tokenize(buf + "\n")
def import_file_to_hst(fpath):
"""Import content from fpath and return an Hy AST."""
try:
with open(fpath, 'r', encoding='utf-8') as f:
return import_buffer_to_hst(f.read())
except IOError as e:
raise HyIOError(e.errno, e.strerror, e.filename)
def import_buffer_to_ast(buf, module_name):
""" Import content from buf and return a Python AST."""
return hy_compile(import_buffer_to_hst(buf), module_name)
def import_file_to_ast(fpath, module_name):
"""Import content from fpath and return a Python AST."""
return hy_compile(import_file_to_hst(fpath), module_name)
def import_file_to_module(module_name, fpath):
"""Import content from fpath and puts it into a Python module.
Returns the module."""
try:
_ast = import_file_to_ast(fpath, module_name)
mod = imp.new_module(module_name)
mod.__file__ = fpath
eval(ast_compile(_ast, fpath, "exec"), mod.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
with open(fpath, 'rt') as fp:
e.source = fp.read()
e.filename = fpath
raise
except Exception:
sys.modules.pop(module_name, None)
raise
return mod
def import_buffer_to_module(module_name, buf):
try:
_ast = import_buffer_to_ast(buf, module_name)
mod = imp.new_module(module_name)
eval(ast_compile(_ast, "", "exec"), mod.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
e.source = buf
e.filename = '<stdin>'
raise
return mod
def hy_eval(hytree, namespace, module_name):
foo = HyObject()
foo.start_line = 0
foo.end_line = 0
foo.start_column = 0
foo.end_column = 0
replace_hy_obj(hytree, foo)
if not isinstance(module_name, string_types):
raise HyTypeError(foo, "Module name must be a string")
_ast, expr = hy_compile(hytree, module_name, get_expr=True)
# Spoof the positions in the generated ast...
for node in ast.walk(_ast):
node.lineno = 1
node.col_offset = 1
for node in ast.walk(expr):
node.lineno = 1
node.col_offset = 1
if not isinstance(namespace, dict):
raise HyTypeError(foo, "Globals must be a dictionary")
# Two-step eval: eval() the body of the exec call
eval(ast_compile(_ast, "<eval_body>", "exec"), namespace)
# Then eval the expression context and return that
return eval(ast_compile(expr, "<eval>", "eval"), namespace)
def write_hy_as_pyc(fname):
with open(fname, 'U') as f:
try:
st = os.fstat(f.fileno())
except AttributeError:
st = os.stat(fname)
timestamp = long_type(st.st_mtime)
_ast = import_file_to_ast(fname,
os.path.basename(os.path.splitext(fname)[0]))
code = ast_compile(_ast, fname, "exec")
cfile = "%s.pyc" % fname[:-len(".hy")]
open_ = builtins.open
with open_(cfile, 'wb') as fc:
if PY3:
fc.write(b'\0\0\0\0')
else:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
if PY33:
wr_long(fc, st.st_size)
marshal.dump(code, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
class MetaLoader(object):
def __init__(self, path):
self.path = path
def is_package(self, fullname):
dirpath = "/".join(fullname.split("."))
for pth in sys.path:
pth = os.path.abspath(pth)
composed_path = "%s/%s/__init__.hy" % (pth, dirpath)
if os.path.exists(composed_path):
return True
return False
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if not self.path:
return
sys.modules[fullname] = None
mod = import_file_to_module(fullname,
self.path)
ispkg = self.is_package(fullname)
mod.__file__ = self.path
mod.__loader__ = self
mod.__name__ = fullname
if ispkg:
mod.__path__ = []
mod.__package__ = fullname
else:
mod.__package__ = fullname.rpartition('.')[0]
sys.modules[fullname] = mod
return mod
class MetaImporter(object):
def find_on_path(self, fullname):
fls = ["%s/__init__.hy", "%s.hy"]
dirpath = "/".join(fullname.split("."))
for pth in sys.path:
pth = os.path.abspath(pth)
for fp in fls:
composed_path = fp % ("%s/%s" % (pth, dirpath))
if os.path.exists(composed_path):
return composed_path
def find_module(self, fullname, path=None):
path = self.find_on_path(fullname)
if path:
return MetaLoader(path)
sys.meta_path.insert(0, MetaImporter())
sys.path.insert(0, "")
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mox
import netaddr
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.objects import instance
from nova.objects import instance_info_cache
from nova.objects import security_group
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.objects import test_instance_fault
from nova.tests.objects import test_objects
class _TestInstanceObject(object):
@property
def fake_instance(self):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['cell_name'] = 'api!child'
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['deleted'] = False
fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
fake_instance['security_groups'] = []
fake_instance['pci_devices'] = []
fake_instance['user_id'] = self.context.user_id
fake_instance['project_id'] = self.context.project_id
return fake_instance
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
timeutils.isotime(datetime.datetime(1955, 11, 5)))
inst = instance.Instance()
inst.uuid = 'fake-uuid'
inst.launched_at = red_letter_date
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0',
'nova_object.data':
{'uuid': 'fake-uuid',
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['uuid', 'launched_at']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertTrue(isinstance(inst2.launched_at,
datetime.datetime))
self.assertEqual(inst2.launched_at, red_letter_date)
def test_ip_deserialization(self):
inst = instance.Instance()
inst.uuid = 'fake-uuid'
inst.access_ip_v4 = '1.2.3.4'
inst.access_ip_v6 = '::1'
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': '1.0',
'nova_object.data':
{'uuid': 'fake-uuid',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertEqual(primitive, expected)
inst2 = instance.Instance.obj_from_primitive(primitive)
self.assertTrue(isinstance(inst2.access_ip_v4, netaddr.IPAddress))
self.assertTrue(isinstance(inst2.access_ip_v6, netaddr.IPAddress))
self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[]
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'uuid',
expected_attrs=[])
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertFalse(inst.obj_attr_is_set(attr))
self.assertRemotes()
def test_get_with_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
exp_cols.remove('fault')
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols
).AndReturn(self.fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
self.context, [self.fake_instance['uuid']]
).AndReturn(fake_faults)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(
self.context, 'uuid',
expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertTrue(inst.obj_attr_is_set(attr))
self.assertRemotes()
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(self.context, 'instid',
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_id(self.context, 'instid')
self.assertEqual(inst.uuid, self.fake_instance['uuid'])
self.assertRemotes()
def test_load(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
system_metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['system_metadata']
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertFalse(hasattr(inst, '_system_metadata'))
sys_meta = inst.system_metadata
self.assertEqual(sys_meta, {'foo': 'bar'})
self.assertTrue(hasattr(inst, '_system_metadata'))
# Make sure we don't run load again
sys_meta2 = inst.system_metadata
self.assertEqual(sys_meta2, {'foo': 'bar'})
self.assertRemotes()
def test_load_invalid(self):
inst = instance.Instance()
inst._context = self.context
inst.uuid = 'fake-uuid'
self.assertRaises(exception.ObjectActionError,
inst.obj_load_attr, 'foo')
def test_get_remote(self):
# isotime doesn't have microseconds and is always UTC
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_instance = self.fake_instance
db.instance_get_by_uuid(self.context, 'fake-uuid',
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
self.assertEqual(inst.id, fake_instance['id'])
self.assertEqual(inst.launched_at, fake_instance['launched_at'])
self.assertEqual(str(inst.access_ip_v4),
fake_instance['access_ip_v4'])
self.assertEqual(str(inst.access_ip_v6),
fake_instance['access_ip_v6'])
self.assertRemotes()
def test_refresh(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(inst.host, 'orig-host')
inst.refresh()
self.assertEqual(inst.host, 'new-host')
self.assertRemotes()
self.assertEqual(set([]), inst.obj_what_changed())
def test_refresh_does_not_recurse(self):
inst = instance.Instance()
inst._context = self.context
inst.uuid = 'fake-uuid'
inst.metadata = {}
inst_copy = instance.Instance()
inst_copy.uuid = inst.uuid
self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata']
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
def _save_test_helper(self, cell_type, save_kwargs):
"""Common code for testing save() for cells/non-cells."""
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
vm_state='old', task_state='old')
fake_uuid = old_ref['uuid']
expected_updates = dict(vm_state='meow', task_state='wuff',
user_data='new')
new_ref = dict(old_ref, host='newhost', **expected_updates)
exp_vm_state = save_kwargs.get('expected_vm_state')
exp_task_state = save_kwargs.get('expected_task_state')
admin_reset = save_kwargs.get('admin_state_reset', False)
if exp_vm_state:
expected_updates['expected_vm_state'] = exp_vm_state
if exp_task_state:
expected_updates['expected_task_state'] = exp_task_state
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_at_top')
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_from_api')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
update_cells=False,
columns_to_join=['info_cache', 'security_groups']
).AndReturn((old_ref, new_ref))
if cell_type == 'api':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_from_api(
self.context, mox.IsA(instance.Instance),
exp_vm_state, exp_task_state, admin_reset)
elif cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_at_top(self.context, new_ref)
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
self.assertEqual('old', inst.task_state)
self.assertEqual('old', inst.vm_state)
self.assertEqual('old', inst.user_data)
inst.vm_state = 'meow'
inst.task_state = 'wuff'
inst.user_data = 'new'
inst.save(**save_kwargs)
self.assertEqual('newhost', inst.host)
self.assertEqual('meow', inst.vm_state)
self.assertEqual('wuff', inst.task_state)
self.assertEqual('new', inst.user_data)
self.assertEqual(set([]), inst.obj_what_changed())
def test_save(self):
self._save_test_helper(None, {})
def test_save_in_api_cell(self):
self._save_test_helper('api', {})
def test_save_in_compute_cell(self):
self._save_test_helper('compute', {})
def test_save_exp_vm_state(self):
self._save_test_helper(None, {'expected_vm_state': ['meow']})
def test_save_exp_task_state(self):
self._save_test_helper(None, {'expected_task_state': ['meow']})
def test_save_exp_vm_state_api_cell(self):
self._save_test_helper('api', {'expected_vm_state': ['meow']})
def test_save_exp_task_state_api_cell(self):
self._save_test_helper('api', {'expected_task_state': ['meow']})
def test_save_exp_task_state_api_cell_admin_reset(self):
self._save_test_helper('api', {'admin_state_reset': True})
def test_save_rename_sends_notification(self):
# Tests that simply changing the 'display_name' on the instance
# will send a notification.
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, display_name='hello')
fake_uuid = old_ref['uuid']
expected_updates = dict(display_name='goodbye')
new_ref = dict(old_ref, **expected_updates)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates, update_cells=False,
columns_to_join=['info_cache', 'security_groups']
).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
self.assertEqual(set([]), inst.obj_what_changed())
def test_get_deleted(self):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertEqual(inst.deleted, True)
def test_get_not_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, False)
def test_get_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertEqual(inst.cleaned, True)
def test_with_info_cache(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
nwinfo1_json = nwinfo1.json()
nwinfo2_json = nwinfo2.json()
fake_inst['info_cache'] = {'network_info': nwinfo1_json,
'instance_uuid': fake_uuid}
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(inst.info_cache.network_info, nwinfo1)
self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
inst.info_cache.network_info = nwinfo2
inst.save()
def test_with_info_cache_none(self):
fake_inst = dict(self.fake_instance, info_cache=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['info_cache'])
self.assertEqual(None, inst.info_cache)
def test_with_security_groups(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(len(inst.security_groups), 2)
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
inst.security_groups[index][key])
self.assertTrue(isinstance(inst.security_groups[index],
security_group.SecurityGroup))
self.assertEqual(inst.security_groups.obj_what_changed(), set())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(inst.security_groups.obj_what_changed(), set())
def test_with_empty_security_groups(self):
fake_inst = dict(self.fake_instance, security_groups=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(0, len(inst.security_groups))
def test_with_empty_pci_devices(self):
fake_inst = dict(self.fake_instance, pci_devices=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(len(inst.pci_devices), 0)
def test_with_pci_devices(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['pci_devices'] = [
{'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'dev_type': 't',
'status': 'allocated',
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'extra_info': '{}'},
{
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'allocated',
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'extra_info': '{}'},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(len(inst.pci_devices), 2)
self.assertEqual(inst.pci_devices[0].instance_uuid, fake_uuid)
self.assertEqual(inst.pci_devices[1].instance_uuid, fake_uuid)
def test_with_fault(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[],
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
self.assertRemotes()
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(instance.Instance, 'name', 'foo')
inst = instance.Instance()
inst.uuid = 'fake-uuid'
self.assertEqual(inst.items(),
{'uuid': 'fake-uuid',
'name': 'foo',
}.items())
def _test_metadata_change_tracking(self, which):
inst = instance.Instance()
inst.uuid = 'fake-uuid'
setattr(inst, which, {})
inst.obj_reset_changes()
getattr(inst, which)['foo'] = 'bar'
self.assertEqual(set([which]), inst.obj_what_changed())
inst.obj_reset_changes()
self.assertEqual(set(), inst.obj_what_changed())
def test_metadata_change_tracking(self):
self._test_metadata_change_tracking('metadata')
def test_system_metadata_change_tracking(self):
self._test_metadata_change_tracking('system_metadata')
def test_create_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance()
inst.host = 'foo-host'
inst.memory_mb = 128
inst.system_metadata = {'foo': 'bar'}
inst.create(self.context)
def test_create(self):
self.mox.StubOutWithMock(db, 'instance_create')
db.instance_create(self.context, {}).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance()
inst.create(self.context)
self.assertEqual(self.fake_instance['id'], inst.id)
def test_create_with_values(self):
inst1 = instance.Instance()
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo-host'}
inst1.update(values)
inst1.create(self.context)
self.assertEqual(inst1.host, 'foo-host')
inst2 = instance.Instance.get_by_uuid(self.context, inst1.uuid)
self.assertEqual(inst2.host, 'foo-host')
def test_recreate_fails(self):
inst = instance.Instance()
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo-host'}
inst.update(values)
inst.create(self.context)
self.assertRaises(exception.ObjectActionError, inst.create,
self.context)
def test_create_with_special_things(self):
self.mox.StubOutWithMock(db, 'instance_create')
fake_inst = fake_instance.fake_db_instance()
db.instance_create(self.context,
{'host': 'foo-host',
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
}
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance()
inst.host = 'foo-host'
secgroups = security_group.SecurityGroupList()
secgroups.objects = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.objects.append(secgroup)
inst.security_groups = secgroups
inst.info_cache = instance_info_cache.InstanceInfoCache()
inst.info_cache.network_info = []
inst.create(self.context)
def test_destroy_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid', constraint=None)
self.mox.ReplayAll()
inst = instance.Instance()
inst.id = 1
inst.uuid = 'fake-uuid'
inst.host = 'foo'
inst.destroy(self.context)
def test_destroy(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance()
inst.id = db_inst['id']
inst.uuid = db_inst['uuid']
inst.destroy(self.context)
self.assertRaises(exception.InstanceNotFound,
db.instance_get_by_uuid, self.context,
db_inst['uuid'])
def test_destroy_host_constraint(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.host = None
self.assertRaises(exception.ObjectActionError,
inst.destroy)
def test_name_does_not_trigger_lazy_loads(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
self.assertFalse(inst.obj_attr_is_set('fault'))
self.flags(instance_name_template='foo-%(uuid)s')
self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
self.assertFalse(inst.obj_attr_is_set('fault'))
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
pass
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
fake_instance = fakes.stub_instance(id=2,
access_ipv4='1.2.3.4',
access_ipv6='::1')
fake_instance['scheduled_at'] = None
fake_instance['terminated_at'] = None
fake_instance['deleted_at'] = None
fake_instance['created_at'] = None
fake_instance['updated_at'] = None
fake_instance['launched_at'] = (
fake_instance['launched_at'].replace(
tzinfo=iso8601.iso8601.Utc(), microsecond=0))
fake_instance['info_cache'] = {'network_info': '[]',
'instance_uuid': fake_instance['uuid']}
fake_instance['security_groups'] = []
fake_instance['deleted'] = 0
if updates:
fake_instance.update(updates)
return fake_instance
def test_get_all_by_filters(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata']).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
expected_attrs=['metadata'])
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_all_by_filters_works_for_cleaned(self):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
self.context.read_deleted = 'yes'
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata']).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'])
self.assertEqual(1, len(inst_list))
self.assertTrue(isinstance(inst_list.objects[0], instance.Instance))
self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
self.assertRemotes()
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertEqual(inst_list.objects[i]._context, self.context)
self.assertEqual(inst_list.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_host_and_node(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar'
).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_node(self.context,
'foo', 'bar')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_by_host_and_not_type(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
db.instance_get_all_by_host_and_not_type(self.context, 'foo',
type_id='bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host_and_not_type(
self.context, 'foo', 'bar')
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_get_hung_in_rebooting(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = timeutils.isotime()
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_hung_in_rebooting(self.context,
dt)
for i in range(0, len(fakes)):
self.assertTrue(isinstance(inst_list.objects[i],
instance.Instance))
self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
self.assertRemotes()
def test_with_fault(self):
fake_insts = [
fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
]
fake_faults = test_instance_fault.fake_faults
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host', columns_to_join=[]
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
).AndReturn(fake_faults)
self.mox.ReplayAll()
instances = instance.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'])
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault.iteritems()))
self.assertEqual(None, instances[1].fault)
def test_fill_faults(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
inst1 = instance.Instance()
inst1.uuid = 'uuid1'
inst2 = instance.Instance()
inst2.uuid = 'uuid2'
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': 'uuid1',
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
db.instance_fault_get_by_instance_uuids(self.context,
[x.uuid for x in insts],
).AndReturn(db_faults)
self.mox.ReplayAll()
inst_list = instance.InstanceList()
inst_list._context = self.context
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual(faulty, ['uuid1'])
self.assertEqual(inst_list[0].fault.message,
db_faults['uuid1'][0]['message'])
self.assertEqual(inst_list[1].fault, None)
for inst in inst_list:
self.assertEqual(inst.obj_what_changed(), set())
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.NoDBTestCase):
def test_expected_cols(self):
self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertEqual(None, instance._expected_cols(None))
|
|
# util/compat.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
from __future__ import annotations
import base64
import dataclasses
import inspect
import operator
import platform
import sys
import typing
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
py311 = sys.version_info >= (3, 11)
py310 = sys.version_info >= (3, 10)
py39 = sys.version_info >= (3, 9)
py38 = sys.version_info >= (3, 8)
pypy = platform.python_implementation() == "PyPy"
cpython = platform.python_implementation() == "CPython"
win32 = sys.platform.startswith("win")
osx = sys.platform.startswith("darwin")
arm = "aarch" in platform.machine().lower()
has_refcount_gc = bool(cpython)
dottedgetter = operator.attrgetter
class FullArgSpec(typing.NamedTuple):
args: List[str]
varargs: Optional[str]
varkw: Optional[str]
defaults: Optional[Tuple[Any, ...]]
kwonlyargs: List[str]
kwonlydefaults: Dict[str, Any]
annotations: Dict[str, Any]
def inspect_getfullargspec(func: Callable[..., Any]) -> FullArgSpec:
"""Fully vendored version of getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
co = func.__code__
if not inspect.iscode(co):
raise TypeError("{!r} is not a code object".format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs : nargs + nkwargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return FullArgSpec(
args,
varargs,
varkw,
func.__defaults__,
kwonlyargs,
func.__kwdefaults__,
func.__annotations__,
)
if typing.TYPE_CHECKING or py38:
from importlib import metadata as importlib_metadata
else:
import importlib_metadata # noqa
if typing.TYPE_CHECKING or py39:
# pep 584 dict union
dict_union = operator.or_ # noqa
else:
def dict_union(a: dict, b: dict) -> dict:
a = a.copy()
a.update(b)
return a
def importlib_metadata_get(group):
ep = importlib_metadata.entry_points()
if not typing.TYPE_CHECKING and hasattr(ep, "select"):
return ep.select(group=group)
else:
return ep.get(group, ())
def b(s):
return s.encode("latin-1")
def b64decode(x: str) -> bytes:
return base64.b64decode(x.encode("ascii"))
def b64encode(x: bytes) -> str:
return base64.b64encode(x).decode("ascii")
def decode_backslashreplace(text: bytes, encoding: str) -> str:
return text.decode(encoding, errors="backslashreplace")
def cmp(a, b):
return (a > b) - (a < b)
def _formatannotation(annotation, base_module=None):
"""vendored from python 3.7"""
if isinstance(annotation, str):
return f'"{annotation}"'
if getattr(annotation, "__module__", None) == "typing":
return f'"{repr(annotation).replace("typing.", "").replace("~", "")}"'
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", base_module):
return repr(annotation.__qualname__)
return annotation.__module__ + "." + annotation.__qualname__
elif isinstance(annotation, typing.TypeVar):
return f'"{repr(annotation).replace("~", "")}"'
return f'"{repr(annotation).replace("~", "")}"'
def inspect_formatargspec(
args: List[str],
varargs: Optional[str] = None,
varkw: Optional[str] = None,
defaults: Optional[Sequence[Any]] = None,
kwonlyargs: Optional[Sequence[str]] = (),
kwonlydefaults: Optional[Mapping[str, Any]] = {},
annotations: Mapping[str, Any] = {},
formatarg: Callable[[str], str] = str,
formatvarargs: Callable[[str], str] = lambda name: "*" + name,
formatvarkw: Callable[[str], str] = lambda name: "**" + name,
formatvalue: Callable[[Any], str] = lambda value: "=" + repr(value),
formatreturns: Callable[[Any], str] = lambda text: " -> " + str(text),
formatannotation: Callable[[Any], str] = _formatannotation,
) -> str:
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
Ultimately we would need to rewrite our "decorator" routine completely
which is not really worth it right now, until all Python 2.x support
is dropped.
"""
kwonlydefaults = kwonlydefaults or {}
annotations = annotations or {}
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ": " + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
else:
firstdefault = -1
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append("*")
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = "(" + ", ".join(specs) + ")"
if "return" in annotations:
result += formatreturns(formatannotation(annotations["return"]))
return result
def dataclass_fields(cls):
"""Return a sequence of all dataclasses.Field objects associated
with a class."""
if dataclasses.is_dataclass(cls):
return dataclasses.fields(cls)
else:
return []
def local_dataclass_fields(cls):
"""Return a sequence of all dataclasses.Field objects associated with
a class, excluding those that originate from a superclass."""
if dataclasses.is_dataclass(cls):
super_fields = set()
for sup in cls.__bases__:
super_fields.update(dataclass_fields(sup))
return [f for f in dataclasses.fields(cls) if f not in super_fields]
else:
return []
|
|
"""Compute the action of the matrix exponential.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg import LinearOperator, aslinearoperator
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=1).flat)
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return A.diagonal().sum()
else:
return np.trace(A)
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm, expm_multiply
>>> A = csc_matrix([[1, 0], [0, 1]])
>>> A.todense()
matrix([[1, 0],
[0, 1]], dtype=int64)
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
>>> B
array([ 0.36787944, 0.13533528])
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
array([[ 1. , 0.36787944],
[ 1.64872127, 0.60653066],
[ 2.71828183, 1. ]])
>>> expm(A).dot(B) # Verify 1st timestep
array([ 1. , 0.36787944])
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
array([ 1.64872127, 0.60653066])
>>> expm(2*A).dot(B) # Verify 3rd timestep
array([ 2.71828183, 1. ])
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
def _expm_multiply_simple(A, B, t=1.0, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = np.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p)
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
scale : int, optional
If specified, return the norms of scale*A instead of A.
"""
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self,scale):
"""
Set the scale parameter.
"""
self._scale = scale
def onenorm(self):
"""
Compute the exact 1-norm.
"""
if self._A_1_norm is None:
self._A_1_norm = _exact_1_norm(self._A)
return self._scale*self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = est ** (1.0 / p)
return self._scale*self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
-------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
def _expm_multiply_interval(A, B, start=None, stop=None,
num=None, endpoint=None, balance=False, status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep': True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
t = t_q - t_0
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, q, norm_info, tol, ell,n0)
elif q > s and not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif q > s and (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error')
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
"""
A helper function, for the case q <= s.
"""
# Compute the new values of m_star and s which should be applied
# over intervals of size t/q
if norm_info.onenorm() == 0:
m_star, s = 0, 1
else:
norm_info.set_scale(1./q)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return X, 0
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s == 0.
"""
d = q // s
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(s):
Z = X[i*d]
K[0] = Z
high_p = 0
for k in range(1, d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p > high_p:
K[p] = h * A.dot(K[p-1]) / float(p)
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 1
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s > 0.
"""
d = q // s
j = q // d
r = q - d * j
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(j + 1):
Z = X[i*d]
K[0] = Z
high_p = 0
if i < j:
effective_d = d
else:
effective_d = r
for k in range(1, effective_d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p == high_p + 1:
K[p] = h * A.dot(K[p-1]) / float(p)
high_p = p
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 2
|
|
# A few utility functions
import itertools
import numpy as np
###############################################
# Generally useful functions #
###############################################
# useful with reshape
def linearize_indices(indices, dims):
res = []
remain = indices
for i, _ in enumerate(dims):
res = [remain % dims[-i - 1]] + res
remain = remain / dims[-i - 1]
linearized = tf.transpose(tf.pack(res))
return linearized
###############################################
# Data reading functions #
###############################################
class Config:
def __init__(self, batch_size=20, num_steps=32, learning_rate=1e-2,
l1_reg=2e-3, l1_list=[],
l2_reg=2e-3, l2_list=[],
features_dim=50, init_words=False, input_features=[],
use_rnn=False, rnn_hidden_units=100, rnn_output_size=50,
use_convo=False, conv_window=5, conv_dim=50,
pot_size=1,
pred_window=1, tag_list=[],
verbose=False, num_epochs=10, num_predict=5):
# optimization parameters
self.batch_size = batch_size
self.num_steps = num_steps
self.learning_rate = learning_rate
# regularization parameters
self.l1_reg = l1_reg
self.l1_list = l1_list
self.l2_reg = l2_reg
self.l2_list = l2_list
# input layer
self.features_dim = features_dim
self.init_words = init_words
self.input_features = input_features
# recurrent layer
self.use_rnn = use_rnn
self.rnn_hidden_units = rnn_hidden_units
self.rnn_output_size = rnn_output_size
# convolutional layer
self.use_convo = use_convo
self.conv_window = conv_window
self.conv_dim = conv_dim
# CRF parameters:
self.pot_size = pot_size
self.n_tags = len(tag_list)
# output layer
self.pred_window = pred_window
self.tag_list = tag_list
self.label_dict = {}
tags_ct = 0
for element in itertools.product(tag_list, repeat=pred_window):
tag_st = '_'.join(element)
mid = element[pred_window / 2]
if mid == '<P>':
self.label_dict[tag_st] = (-1, tag_list.index(mid))
else:
self.label_dict[tag_st] = (tags_ct, tag_list.index(mid))
tags_ct += 1
self.n_outcomes = tags_ct
# misc parameters
self.verbose = verbose
self.num_epochs = num_epochs
self.num_predict = num_predict
def make_mappings(self, data):
self.feature_maps = dict([(feat, {'lookup': {'_unk_': 0},
'reverse': ['_unk_']})
for feat in data[0][0]])
for sentence in data:
for token in sentence:
for feat in data[0][0]:
ft = token[feat]
if ft not in self.feature_maps[feat]['lookup']:
self.feature_maps[feat]['lookup'][ft] = \
len(self.feature_maps[feat]['reverse'])
self.feature_maps[feat]['reverse'] += [ft]
def to_string(self):
st = ''
for k, v in self.__dict__.items():
if k not in ['feature_maps', 'label_dict']:
st += k + ' --- ' + str(v) + ' \n'
return st
class Batch:
def __init__(self):
# features: {'word': 'have', 'pos': 'VB', ...} ->
# [1345, 12 * num_features + 1,...]
self.features = []
# tags: 'B' -> 1
self.tags = []
# tags_one_hot: 'B' -> [0, 1, 0, 0, 0, 0]
self.tags_one_hot = []
# tag_windows: '<P>_B_O' -> [0, 1, 3]
self.tag_windows = []
# tag_windows_lin: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags **2 + 1 * config.n_tags + 3
self.tag_windows_lin = []
# tag_windows_one_hot: '<P>_B_O' -> [0, ..., 0, 1, 0, ..., 0]
self.tag_windows_one_hot = []
# tag_neighbours: '<P>_B_O' -> [0, 3]
self.tag_neighbours = []
# tag_neighbours_linearized: '<P>_B_O' -> num_values * token_id + 0 * config.n_tags + 3
self.tag_neighbours_lin = []
# mask: <P> -> 0, everything else -> 1
def read(self, data, start, config, fill=False):
num_features = len(config.input_features)
batch_data = data[start:start + config.batch_size]
batch_features = [[[config.feature_maps[feat]['lookup'][token[feat]]
for feat in config.input_features]
for token in sentence]
for sentence in batch_data]
batch_labels = [[config.label_dict[token['label']]
for token in sentence]
for sentence in batch_data]
# multiply feature indices for use in tf.nn.embedding_lookup
self.features = [[[num_features * ft + i for i, ft in enumerate(word)]
for word in sentence] for sentence in batch_features]
self.tags = [[label[1] for label in sentence]
for sentence in batch_labels]
self.tags_one_hot = [[[int(x == label[1] and x > 0) # TODO: count padding tokens?
for x in range(config.n_tags)]
for label in sentence]
for sentence in batch_labels]
self.tag_windows_one_hot = [[[int(x == label[0])
for x in range(config.n_outcomes)]
for label in sentence]
for sentence in batch_labels]
if fill:
max_len = max(config.conv_window,
max([len(sentence) for sentence in batch_data]) + 2)
for i in range(config.batch_size):
current_len = len(batch_data[i])
pre_len = (max_len - current_len) / 2
post_len = max_len - pre_len - current_len
self.features[i] = [range(num_features)] * pre_len + \
self.features[i] + \
[range(num_features)] * post_len
self.tags[i] = [0] * pre_len + self.tags[i] + [0] * post_len
self.tags_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tags_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
self.tag_windows_one_hot[i] = [[0] * config.n_outcomes] * pre_len + \
self.tag_windows_one_hot[i] + \
[[0] * config.n_outcomes] * post_len
mid = config.pot_window / 2
padded_tags = [[0] * mid + sentence + [0] * mid
for sentence in self.tags]
# get linearized window indices
self.tag_windows = [[sent[i + j] for j in range(-mid, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
n_indices = config.n_tags ** config.pot_window
self.tag_windows_lin = [sum([t * (config.n_tags ** (config.pot_window - 1 - i))
for i, t in enumerate(window)]) + i * n_indices
for i, window in enumerate(self.tag_windows)]
# get linearized potential indices
self.tag_neighbours = [[sent[i + j]
for j in range(-mid, 0) + range(1, mid + 1)]
for sent in padded_tags
for i in range(mid, len(sent) - mid)]
max_pow = config.pot_window - 1
n_indices = config.n_tags ** max_pow
self.tag_neighbours_lin = [sum([idx * (config.n_tags) ** (max_pow - j - 1)
for j, idx in enumerate(token)]) + i * n_indices
for i, token in enumerate(self.tag_neighbours)]
# make mask:
self.mask = [[int(tag > 0) for tag in sent] for sent in self.tags]
def aggregate_labels(sentence, config):
pre_tags = ['<P>'] * (config.pred_window / 2)
sentence_ext = pre_tags + [token['label']
for token in sentence] + pre_tags
for i, token in enumerate(sentence):
current = token['label']
sentence[i]['label'] = '_'.join([sentence_ext[i+j]
for j in range(config.pred_window)])
def read_data(file_name, features, config):
sentences = []
sentence = []
f = open(file_name)
c = 0
for line in f:
c += 1
if c % 100000 == 0:
print c, 'lines read'
if len(line.strip()) == 0 and len(sentence) > 0:
sentences += [sentence[:]]
sentence = []
else:
sentence += [dict(zip(features, line.strip().split('\t')))]
if len(sentence) > 0:
sentences += [sentence[:]]
f.close()
foo = [aggregate_labels(sentence, config) for sentence in sentences]
return sentences
def show(sentence):
return ' '.join([token['word']+'/'+token['label'] for token in sentence])
# read pre_trained word vectors
def read_vectors(file_name, vocab):
vectors = {}
f = open(file_name)
dim = int(f.readline().strip().split()[1])
for line in f:
w = line.split()[0]
vec = [float(x) for x in line.strip().split()[1:]]
vectors[w] = np.array(vec)
f.close()
res = np.zeros((len(vocab), dim))
for i, w in enumerate(vocab):
res[i] = vectors.get(w, np.zeros(dim))
return res
# extract windows from data to fit into unrolled RNN. Independent sentences
def cut_and_pad(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
num_steps = config.num_steps
res = []
seen = 0
pad_len = max(config.pred_window, config.pot_window) / 2
sen = [pad_token] * pad_len + data[0] + [pad_token] * pad_len
while seen < len(data):
if len(sen) < num_steps:
if sen[0]['label'] == '<P>':
new_sen = ((num_steps - len(sen)) / 2) * [pad_token] + sen
else:
new_sen = sen
new_sen = new_sen + (num_steps - len(new_sen)) * [pad_token]
res += [new_sen[:]]
seen += 1
if seen < len(data):
sen = [pad_token] * pad_len + data[seen] + [pad_token] * pad_len
else:
res += [sen[:num_steps]]
sen = sen[(2 * num_steps) / 3:]
return res
# extract windows from data to fit into unrolled RNN. Continuous model
def cut_batches(data, config):
pad_token = dict([(feat, '_unk_') for feat in data[0][0]])
pad_token['label'] = '_'.join(['<P>'] * config.pred_window)
padding = [pad_token] * config.pred_window
new_data = padding + [tok for sentence in data
for tok in sentence + padding]
step_size = (config.num_steps / 2)
num_cuts = len(new_data) / step_size
res = [new_data[i * step_size: i * step_size + config.num_steps]
for i in range(num_cuts)]
res[-1] = res[-1] + [pad_token] * (config.num_steps - len(res[-1]))
return res
###############################################
# NN evaluation functions #
###############################################
def treat_spans(spans_file):
span_lists = []
f = open(spans_file)
y = []
for line in f:
if line.strip() == '':
span_lists += [y[:]]
y = []
else:
lsp = line.strip().split()
y = y + [(int(lsp[0]), int(lsp[1]), lsp[2])]
f.close()
return span_lists
def find_gold(sentence):
gold = []
current_gold = []
for i, token in enumerate(sentence):
if token['label'] == 'B' or token['label'] == 'O':
if len(current_gold) > 0:
gold += [tuple(current_gold)]
current_gold = []
if 'I' in token['label'] or token['label'] == 'B':
current_gold += [i]
if len(current_gold) > 0:
gold += [tuple(current_gold)]
return gold
def make_scores(token, thr):
res = dict([(key, val)
for key, val in token.items()
if key in ['O', 'OD', 'I', 'ID', 'B'] and val > thr])
return res
def find_mentions(sentence, thr=0.02):
scores = [make_scores(token, thr) for token in sentence]
found = []
working = []
for i, score in enumerate(scores):
if 'B' in score or 'O' in score:
for work in working:
if work[0][-1] == i-1:
sc = work[1] + np.log(score.get('B', 0) +
score.get('O', 0))
sc /= (work[0][-1] + 2 - work[0][0])
found += [(tuple(work[0]), np.exp(sc))]
if len(score) == 1 and 'O' in score:
working = []
else:
new_working = []
if 'B' in score:
new_working = [[[i], np.log(score['B']), False]]
for work in working:
for tg, sc in score.items():
if tg == 'OD':
new_working += [[work[0], work[1] + np.log(sc), True]]
elif tg == 'ID' and work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
True]]
elif tg == 'I' and not work[2]:
new_working += [[work[0] + [i], work[1] + np.log(sc),
False]]
working = new_working[:]
if len(working) > 1000:
working = sorted(working, key=lambda x: x[1],
reverse=True)[:1000]
return sorted(found, key=lambda x: x[1], reverse=True)
def read_sentence(sentence):
return (sentence, find_gold(sentence), find_mentions(sentence))
def merge(sentences, spans):
res = []
sent = read_sentence(sentences[0])
span = spans[0]
for i, sp in enumerate(spans):
if i == 0:
continue
if sp[0] == span[0]:
sen = read_sentence(sentences[i])
gold = sorted(list(set(sen[1] + sent[1])))
sent = (sen[0], gold, sen[2])
else:
res += [(sent, span)]
sent = read_sentence(sentences[i])
span = spans[i]
res += [(sent, span)]
return res
def evaluate(merged_sentences, threshold):
TP = 0
FP = 0
FN = 0
for sentence in merged_sentences:
true_mentions = sentence[0][1]
tp = 0
for pred in sentence[0][2]:
if pred[1] >= threshold:
if pred[0] in true_mentions:
tp += 1
else:
FP += 1
TP += tp
FN += len(true_mentions) - tp
if (TP + FP) == 0:
prec = 0
recall = 0
else:
prec = float(TP) / (TP + FP)
recall = float(TP) / (TP + FN)
if prec == 0 or recall == 0:
f1 = 0
else:
f1 = 2 * (prec * recall) / (prec + recall)
print 'TH:', threshold, '\t', 'P:', prec, '\t', 'R:', recall, '\t', 'F:', f1
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connection_operations import build_get_request, build_list_by_batch_account_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionOperations:
"""PrivateEndpointConnectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.batch.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_batch_account(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ListPrivateEndpointConnectionsResult"]:
"""Lists all of the private endpoint connections in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the response.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListPrivateEndpointConnectionsResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.batch.models.ListPrivateEndpointConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListPrivateEndpointConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_batch_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
maxresults=maxresults,
template_url=self.list_by_batch_account.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_batch_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListPrivateEndpointConnectionsResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateEndpointConnections'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets information about the specified private endpoint connection.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param private_endpoint_connection_name: The private endpoint connection name. This must be
unique within the account.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PrivateEndpointConnection')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Updates the properties of an existing private endpoint connection.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param private_endpoint_connection_name: The private endpoint connection name. This must be
unique within the account.
:type private_endpoint_connection_name: str
:param parameters: PrivateEndpointConnection properties that should be updated. Properties that
are supplied will be updated, any property not supplied will be unchanged.
:type parameters: ~azure.mgmt.batch.models.PrivateEndpointConnection
:param if_match: The state (ETag) version of the private endpoint connection to update. This
value can be omitted or set to "*" to apply the operation unconditionally.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.batch.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
if_match=if_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
|
"""SOClone views."""
import datetime
import itertools
from django.contrib.auth.models import User
from django.contrib.auth import views as auth_views
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from lxml.html.diff import htmldiff
from markdown2 import Markdown
from soclone import auth
from soclone import diff
from soclone.forms import (AddAnswerForm, AskQuestionForm, CloseQuestionForm,
CommentForm, EditAnswerForm, EditQuestionForm, RetagQuestionForm,
RevisionForm)
from soclone.http import JsonResponse
from soclone.models import (Answer, AnswerRevision, Badge, Comment,
FavouriteQuestion, Question, QuestionRevision, Tag, Vote)
from soclone.questions import (all_question_views, index_question_views,
unanswered_question_views)
from soclone.shortcuts import get_page
from soclone.utils.html import sanitize_html
from soclone.utils.models import populate_foreign_key_caches
markdowner = Markdown(html4tags=True)
AUTO_WIKI_ANSWER_COUNT = 30
def get_questions_per_page(user):
if user.is_authenticated():
return user.questions_per_page
return 10
def question_list(request, question_views, template, questions_per_page=None,
page_number=None, extra_context=None):
"""
Question list generic view.
Allows the user to select from a number of ways of viewing questions,
rendered with the given template.
"""
view_id = request.GET.get('sort', None)
view = dict([(q.id, q) for q in question_views]).get(view_id,
question_views[0])
if questions_per_page is None:
questions_per_page = get_questions_per_page(request.user)
paginator = Paginator(view.get_queryset(), questions_per_page)
if page_number is None:
page = get_page(request, paginator)
else:
page = paginator.page(page_number)
populate_foreign_key_caches(User, ((page.object_list, (view.user,)),),
fields=view.user_fields)
context = {
'title': view.page_title,
'page': page,
'questions': page.object_list,
'current_view': view,
'question_views': question_views,
}
if extra_context is not None:
context.update(extra_context)
return render_to_response(template, context,
context_instance=RequestContext(request))
def index(request):
"""A condensed version of the main Question list."""
extra_context = {
# TODO Retrieve extra context required for index page
}
return question_list(request, index_question_views, 'index.html',
questions_per_page=50, page_number=1,
extra_context=extra_context)
def about(request):
"""About SOClone."""
raise NotImplementedError
def faq(request):
"""Frequently Asked Questions."""
raise NotImplementedError
def search(request):
"""Search Questions and Answers."""
raise NotImplementedError
def login(request):
"""Logs in."""
return auth_views.login(request, template_name='login.html')
def logout(request):
"""Logs out."""
return auth_views.logout(request, template_name='logged_out.html')
def questions(request):
"""All Questions list."""
return question_list(request, all_question_views, 'questions.html')
def unanswered(request):
"""Unanswered Questions list."""
return question_list(request, unanswered_question_views, 'unanswered.html')
ANSWER_SORT = {
'votes': ('-score', '-added_at'),
'newest': ('-added_at',),
'oldest': ('added_at',),
}
DEFAULT_ANSWER_SORT = 'votes'
def question(request, question_id):
"""Displays a Question."""
if not request.user.is_authenticated():
question = get_object_or_404(Question, id=question_id)
favourite = False
else:
question = get_object_or_404(Question.objects.extra(
select={
'user_favourite_id': (
'SELECT id FROM soclone_favouritequestion '
'WHERE question_id = soclone_question.id '
'AND user_id = %s'),
},
select_params=[request.user.id]
), id=question_id)
favourite = (question.user_favourite_id is not None)
if 'showcomments' in request.GET:
return question_comments(request, question)
answer_sort_type = request.GET.get('sort', DEFAULT_ANSWER_SORT)
if answer_sort_type not in ANSWER_SORT:
answer_sort_type = DEFAULT_ANSWER_SORT
order_by = ANSWER_SORT[answer_sort_type]
paginator = Paginator(Answer.objects.for_question(
question, request.user).order_by(*order_by),
AUTO_WIKI_ANSWER_COUNT)
# Save ourselves a COUNT() query by using the denormalised count
paginator._count = question.answer_count
page = get_page(request, paginator)
answers = page.object_list
populate_foreign_key_caches(User, (
((question,), ('author', 'last_edited_by', 'closed_by')),
(answers, ('author', 'last_edited_by'))
),
fields=('username', 'gravatar', 'reputation', 'gold', 'silver',
'bronze'))
# Look up vote status for the current user
question_vote, answer_votes = Vote.objects.get_for_question_and_answers(
request.user, question, page.object_list)
title = question.title
if question.closed:
title = '%s [closed]' % title
return render_to_response('question.html', {
'title': title,
'question': question,
'question_vote': question_vote,
'favourite': favourite,
'answers': page.object_list,
'answer_votes': answer_votes,
'page': page,
'answer_sort': answer_sort_type,
'answer_form': AddAnswerForm(),
'tags': question.tags.all(),
}, context_instance=RequestContext(request))
def question_comments(request, question, form=None):
"""
Displays a Question and any Comments on it.
This is primarily intended as a fallback for users who can't
dynamically load Comments.
"""
populate_foreign_key_caches(User, (
((question,), ('author', 'last_edited_by', 'closed_by')),
),
fields=('username', 'gravatar', 'reputation', 'gold', 'silver',
'bronze'))
content_type = ContentType.objects.get_for_model(Question)
comments = Comment.objects.filter(content_type=content_type,
object_id=question.id)
if form is None:
form = CommentForm()
return render_to_response('question.html', {
'title': u'Comments on %s' % question.title,
'question': question,
'tags': question.tags.all(),
'comments': comments,
'comment_form': form,
}, context_instance=RequestContext(request))
def ask_question(request):
"""Adds a Question."""
preview = None
if request.method == 'POST':
form = AskQuestionForm(request.POST)
if form.is_valid():
html = sanitize_html(markdowner.convert(form.cleaned_data['text']))
if 'preview' in request.POST:
# The user submitted the form to preview the formatted question
preview = mark_safe(html)
elif 'submit' in request.POST:
added_at = datetime.datetime.now()
# Create the Question
question = Question(
title = form.cleaned_data['title'],
author = request.user,
added_at = added_at,
wiki = form.cleaned_data['wiki'],
last_activity_at = added_at,
last_activity_by = request.user,
tagnames = form.cleaned_data['tags'],
html = html,
summary = strip_tags(html)[:180]
)
if question.wiki:
question.wikified_at = added_at
# When in wiki mode, we always display the last edit
question.last_edited_at = added_at
question.last_edited_by = request.user
question.save()
# Create the initial revision
QuestionRevision.objects.create(
question = question,
revision = 1,
title = question.title,
author = request.user,
revised_at = added_at,
tagnames = question.tagnames,
summary = u'asked question',
text = form.cleaned_data['text']
)
# TODO Badges related to Tag usage
# TODO Badges related to asking Questions
return HttpResponseRedirect(question.get_absolute_url())
else:
form = AskQuestionForm()
return render_to_response('ask_question.html', {
'title': u'Ask a Question',
'form': form,
'preview': preview,
}, context_instance=RequestContext(request))
def edit_question(request, question_id):
"""
Entry point for editing a question.
Fields which can be edited depend on the logged-in user's roles or
reputation, so this view delegates to the apporopriate view based on
those criteria.
"""
question = get_object_or_404(Question, id=question_id)
if auth.can_edit_post(request.user, question):
return _edit_question(request, question)
elif auth.can_retag_questions(request.user):
return _retag_question(request, question)
else:
raise Http404
def _edit_question(request, question):
"""
Allows the user to edit a Question's title, text and tags.
If the Question is not already in wiki mode, the user can put it in
wiki mode, or it will automatically be put in wiki mode if the
question has been edited five times by the person who asked it, or
has been edited by four different users.
"""
latest_revision = question.get_latest_revision()
preview = None
revision_form = None
if request.method == 'POST':
if 'select_revision' in request.POST:
# The user submitted to change the revision to start editing from
revision_form = RevisionForm(question, latest_revision, request.POST)
if revision_form.is_valid():
# Replace Question details with those from the selected revision
form = EditQuestionForm(question,
QuestionRevision.objects.get(question=question,
revision=revision_form.cleaned_data['revision']))
else:
# Make sure we keep a hold of the user's other input, even
# though they appear to be messing about.
form = EditQuestionForm(question, latest_revision, request.POST)
else:
# Always check modifications against the latest revision
form = EditQuestionForm(question, latest_revision, request.POST)
if form.is_valid():
html = sanitize_html(
markdowner.convert(form.cleaned_data['text']))
if 'preview' in request.POST:
# The user submitted to preview the formatted question
preview = mark_safe(html)
elif 'submit' in request.POST:
if form.has_changed():
edited_at = datetime.datetime.now()
tags_changed = (latest_revision.tagnames !=
form.cleaned_data['tags'])
tags_updated = False
# Update the Question itself
updated_fields = {
'title': form.cleaned_data['title'],
'last_edited_at': edited_at,
'last_edited_by': request.user,
'last_activity_at': edited_at,
'last_activity_by': request.user,
'tagnames': form.cleaned_data['tags'],
'summary': strip_tags(html)[:180],
'html': html,
}
if ('wiki' in form.cleaned_data and
form.cleaned_data['wiki']):
updated_fields['wiki'] = True
updated_fields['wikified_at'] = edited_at
Question.objects.filter(
id=question.id).update(**updated_fields)
# Update the Question's tag associations
if tags_changed:
tags_updated = Question.objects.update_tags(
question, question.tagnames, request.user)
# Create a new revision
revision = QuestionRevision(
question = question,
title = form.cleaned_data['title'],
author = request.user,
revised_at = edited_at,
tagnames = form.cleaned_data['tags'],
text = form.cleaned_data['text']
)
if form.cleaned_data['summary']:
revision.summary = form.cleaned_data['summary']
else:
revision.summary = \
diff.generate_question_revision_summary(
latest_revision, revision,
('wiki' in updated_fields))
revision.save()
# TODO 5 body edits by the author = automatic wiki mode
# TODO 4 individual editors = automatic wiki mode
# TODO Badges related to Tag usage
# TODO Badges related to editing Questions
return HttpResponseRedirect(question.get_absolute_url())
else:
if 'revision' in request.GET:
revision_form = RevisionForm(question, latest_revision, request.GET)
if revision_form.is_valid():
# Replace Question details with those from the selected revision
form = EditQuestionForm(question,
QuestionRevision.objects.get(question=question,
revision=revision_form.cleaned_data['revision']))
else:
revision_form = RevisionForm(question, latest_revision)
form = EditQuestionForm(question, latest_revision)
if revision_form is None:
# We're about to redisplay after a POST where we didn't care which
# revision was selected - make sure the revision the user started from
# is still selected on redisplay.
revision_form = RevisionForm(question, latest_revision, request.POST)
return render_to_response('edit_question.html', {
'title': u'Edit Question',
'question': question,
'revision_form': revision_form,
'form': form,
'preview': preview,
}, context_instance=RequestContext(request))
def _retag_question(request, question):
"""Allows the user to edit a Question's tags."""
if request.method == 'POST':
form = RetagQuestionForm(question, request.POST)
if form.is_valid():
if form.has_changed():
latest_revision = question.get_latest_revision()
retagged_at = datetime.datetime.now()
# Update the Question itself
Question.objects.filter(id=question.id).update(
tagnames = form.cleaned_data['tags'],
last_edited_at = retagged_at,
last_edited_by = request.user,
last_activity_at = retagged_at,
last_activity_by = request.user
)
# Update the Question's tag associations
tags_updated = Question.objects.update_tags(question,
form.cleaned_data['tags'], request.user)
# Create a new revision
QuestionRevision.objects.create(
question = question,
title = latest_revision.title,
author = request.user,
revised_at = retagged_at,
tagnames = form.cleaned_data['tags'],
summary = u'modified tags',
text = latest_revision.text
)
# TODO Badges related to retagging / Tag usage
# TODO Badges related to editing Questions
return HttpResponseRedirect(question.get_absolute_url())
else:
form = RetagQuestionForm(question)
return render_to_response('retag_question.html', {
'title': u'Edit Tags',
'question': question,
'form': form,
}, context_instance=RequestContext(request))
QUESTION_REVISION_TEMPLATE = ('<h1>%(title)s</h1>\n'
'<div class="text">%(html)s</div>\n'
'<div class="tags">%(tags)s</div>')
def question_revisions(request, question_id):
"""Revision history for a Question."""
question = get_object_or_404(Question, id=question_id)
revisions = list(question.revisions.all())
populate_foreign_key_caches(User, ((revisions, ('author',)),),
fields=('username', 'gravatar', 'reputation', 'gold', 'silver',
'bronze'))
for i, revision in enumerate(revisions):
revision.html = QUESTION_REVISION_TEMPLATE % {
'title': revision.title,
'html': sanitize_html(markdowner.convert(revision.text)),
'tags': ' '.join(['<a class="tag">%s</a>' % tag
for tag in revision.tagnames.split(' ')]),
}
if i > 0:
revisions[i - 1].diff = htmldiff(revision.html,
revisions[i - 1].html)
return render_to_response('question_revisions.html', {
'title': u'Question Revisions',
'question': question,
'revisions': revisions,
}, context_instance=RequestContext(request))
def close_question(request, question_id):
"""Closes or reopens a Question based on its current closed status."""
question = get_object_or_404(Question, id=question_id)
if not auth.can_close_question(request.user, question):
raise Http404
if not question.closed:
return _close_question(request, question)
else:
return _reopen_question(request, question)
def _close_question(request, question):
"""Closes a Question."""
if request.method == 'POST' and 'close' in request.POST:
form = CloseQuestionForm(request.POST)
if form.is_valid():
Question.objects.filter(id=question.id).update(closed=True,
closed_by=request.user, closed_at=datetime.datetime.now(),
close_reason=form.cleaned_data['reason'])
if request.is_ajax():
return JsonResponse({'success': True})
else:
return HttpResponseRedirect(question.get_absolute_url())
elif request.is_ajax():
return JsonResponse({'success': False, 'errors': form.errors})
else:
if request.is_ajax():
raise Http404
form = CloseQuestionForm()
return render_to_response('close_question.html', {
'title': u'Close Question',
'question': question,
'form': form,
}, context_instance=RequestContext(request))
def _reopen_question(request, question):
"""Reopens a Question."""
if request.method == 'POST' and 'reopen' in request.POST:
Question.objects.filter(id=question.id).update(closed=False,
closed_by=None, closed_at=None, close_reason=None)
if request.is_ajax():
return JsonResponse({'success': True})
else:
return HttpResponseRedirect(question.get_absolute_url())
if request.is_ajax():
raise Http404
return render_to_response('reopen_question.html', {
'title': u'Reopen Question',
'question': question,
}, context_instance=RequestContext(request))
def delete_question(request, question_id):
"""Deletes or undeletes a Question."""
raise NotImplementedError
def favourite_question(request, question_id):
"""
Adds or removes a FavouriteQuestion.
Favouriting will not use a confirmation page, as it's an action which
is non-destructive and easily reversible.
"""
if request.method != 'POST':
raise Http404
question = get_object_or_404(Question, id=question_id, deleted=False)
favourite, created = FavouriteQuestion.objects.get_or_create(
user=request.user, question=question)
if not created:
favourite.delete()
if request.is_ajax():
return JsonResponse({'success': True, 'favourited': created})
else:
return HttpResponseRedirect(question.get_absolute_url())
def add_answer(request, question_id):
"""
Adds an Answer to a Question.
Once a certain number of Answers have been added, a Question and all
its Answers will enter wiki mode and all subsequent Answers will be in
wiki mode.
"""
question = get_object_or_404(Question, id=question_id)
preview = None
if request.method == 'POST':
form = AddAnswerForm(request.POST)
if form.is_valid():
html = sanitize_html(markdowner.convert(form.cleaned_data['text']))
if 'preview' in request.POST:
# The user submitted the form to preview the formatted answer
preview = mark_safe(html)
elif 'submit' in request.POST:
added_at = datetime.datetime.now()
# Create the Answer
answer = Answer(
question = question,
author = request.user,
added_at = added_at,
wiki = (form.cleaned_data['wiki'] or
question.answer_count >= AUTO_WIKI_ANSWER_COUNT),
html = html
)
if answer.wiki:
answer.wikified_at = added_at
# When in wiki mode, we always display the last edit
answer.last_edited_at = added_at
answer.last_edited_by = request.user
answer.save()
# Create the initial revision
AnswerRevision.objects.create(
answer = answer,
revision = 1,
author = request.user,
revised_at = added_at,
summary = u'added answer',
text = form.cleaned_data['text']
)
Question.objects.update_answer_count(question)
# TODO Badges related to answering Questions
# TODO If this is answer 30, put question and all answers into
# wiki mode.
# TODO Redirect needs to handle paging
return HttpResponseRedirect(question.get_absolute_url())
else:
form = AddAnswerForm()
return render_to_response('add_answer.html', {
'title': u'Post an Answer',
'question': question,
'form': form,
'preview': preview,
}, context_instance=RequestContext(request))
def answer_comments(request, answer_id, answer=None, form=None):
"""
Displays a single Answer and any Comments on it.
This is primarily intended as a fallback for users who can't
dynamically load Comments.
"""
if answer is None:
answer = get_object_or_404(Answer, id=answer_id)
populate_foreign_key_caches(User, (
((answer,), ('author', 'last_edited_by')),
),
fields=('username', 'gravatar', 'reputation', 'gold', 'silver',
'bronze'))
content_type = ContentType.objects.get_for_model(Answer)
comments = Comment.objects.filter(content_type=content_type,
object_id=answer.id)
if form is None:
form = CommentForm()
return render_to_response('answer.html', {
'title': u'Answer Comments',
'answer': answer,
'comments': comments,
'comment_form': form,
}, context_instance=RequestContext(request))
def edit_answer(request, answer_id):
"""Edits an Answer."""
answer = get_object_or_404(Answer, id=answer_id)
if not auth.can_edit_post(request.user, answer):
raise Http404
latest_revision = answer.get_latest_revision()
preview = None
revision_form = None
if request.method == 'POST':
if 'select_revision' in request.POST:
# The user submitted to change the revision to start editing from
revision_form = RevisionForm(answer, latest_revision, request.POST)
if revision_form.is_valid():
# Replace Question details with those from the selected revision
form = EditAnswerForm(answer,
AnswerRevision.objects.get(answer=answer,
revision=revision_form.cleaned_data['revision']))
else:
# Make sure we keep a hold of the user's other input, even
# though they appear to be messing about.
form = EditAnswerForm(answer, latest_revision, request.POST)
else:
# Always check modifications against the latest revision
form = EditAnswerForm(answer, latest_revision, request.POST)
if form.is_valid():
html = sanitize_html(
markdowner.convert(form.cleaned_data['text']))
if 'preview' in request.POST:
# The user submitted to preview the formatted question
preview = mark_safe(html)
elif 'submit' in request.POST:
if form.has_changed():
edited_at = datetime.datetime.now()
# Update the Answer itself
updated_fields = {
'last_edited_at': edited_at,
'last_edited_by': request.user,
'html': html,
}
if ('wiki' in form.cleaned_data and
form.cleaned_data['wiki']):
updated_fields['wiki'] = True
updated_fields['wikified_at'] = edited_at
Answer.objects.filter(
id=answer.id).update(**updated_fields)
# Create a new revision
revision = AnswerRevision(
answer = answer,
author = request.user,
revised_at = edited_at,
text = form.cleaned_data['text']
)
if form.cleaned_data['summary']:
revision.summary = form.cleaned_data['summary']
else:
revision.summary = \
diff.generate_answer_revision_summary(
latest_revision, revision,
('wiki' in updated_fields))
revision.save()
# TODO 5 body edits by the asker = automatic wiki mode
# TODO 4 individual editors = automatic wiki mode
# TODO Badges related to editing Answers
return HttpResponseRedirect(answer.get_absolute_url())
else:
revision_form = RevisionForm(answer, latest_revision)
form = EditAnswerForm(answer, latest_revision)
if revision_form is None:
# We're about to redisplay after a POST where we didn't care which
# revision was selected - make sure the revision the user started from
# is still selected on redisplay.
revision_form = RevisionForm(answer, latest_revision, request.POST)
return render_to_response('edit_answer.html', {
'title': u'Edit Answer',
'question': answer.question,
'answer': answer,
'revision_form': revision_form,
'form': form,
'preview': preview,
}, context_instance=RequestContext(request))
ANSWER_REVISION_TEMPLATE = '<div class="text">%(html)s</div>'
def answer_revisions(request, answer_id):
"""Revision history for an Answer."""
answer = get_object_or_404(Answer, id=answer_id)
revisions = list(answer.revisions.all())
populate_foreign_key_caches(User, ((revisions, ('author',)),),
fields=('username', 'gravatar', 'reputation', 'gold', 'silver',
'bronze'))
for i, revision in enumerate(revisions):
revision.html = QUESTION_REVISION_TEMPLATE % {
'html': sanitize_html(markdowner.convert(revision.text)),
}
if i > 0:
revisions[i - 1].diff = htmldiff(revision.html,
revisions[i - 1].html)
return render_to_response('answer_revisions.html', {
'title': u'Answer Revisions',
'answer': answer,
'revisions': revisions,
}, context_instance=RequestContext(request))
def accept_answer(request, answer_id):
"""Marks an Answer as accepted."""
raise NotImplementedError
def delete_answer(request, answer_id):
"""Deletes or undeletes an Answer."""
raise NotImplementedError
def vote(request, model, object_id):
"""
Vote on a Question or Answer.
"""
if request.method != 'POST':
raise Http404
vote_type = request.POST.get('type', None)
if vote_type == 'up' and auth.can_vote_up(request.user):
vote_type = Vote.VOTE_UP
elif vote_type == 'down' and auth.can_vote_down(request.user):
vote_type = Vote.VOTE_DOWN
else:
raise Http404
# TODO Ensure users can't vote on their own posts
obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)
content_type = ContentType.objects.get_for_model(model)
try:
existing_vote = Vote.objects.get(content_type=content_type,
object_id=object_id,
user=request.user)
except Vote.DoesNotExist:
existing_vote = None
if existing_vote is None:
Vote.objects.create(content_type=content_type,
object_id=object_id,
user=request.user,
vote=vote_type)
else:
if vote_type == existing_vote.vote:
existing_vote.delete()
else:
existing_vote.vote = vote_type
existing_vote.save()
# TODO Reputation management
if request.is_ajax():
return JsonResponse({
'success': True,
'score': model._default_manager.filter(
id=object_id).values_list('score', flat=True)[0],
})
else:
return HttpResponseRedirect(obj.get_absolute_url())
def flag_item(request, model, object_id):
"""Flag a Question or Answer as containing offensive content."""
raise NotImplementedError
def add_comment(request, model, object_id):
"""Adds a comment to a Question or Answer."""
obj = get_object_or_404(model, id=object_id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
Comment.objects.create(
content_type = ContentType.objects.get_for_model(model),
object_id = object_id,
author = request.user,
added_at = datetime.datetime.now(),
comment = form.cleaned_data['comment']
)
if request.is_ajax():
return JsonResponse({'success': True})
else:
return HttpResponseRedirect(obj.get_absolute_url())
elif request.is_ajax():
return JsonResponse({'success': False, 'errors': form.errors})
else:
form = CommentForm()
# Let the appropriate fallback view take care of display/redisplay
if model is Question:
return question_comments(request, obj, form=form)
elif model is Answer:
return answer_comments(request, object_id, answer=obj, form=form)
def delete_comment(request, comment_id):
"""Deletes a Comment permenantly."""
raise NotImplementedError
TAG_SORT = {
'popular': ('-use_count', 'name'),
'name': ('name',),
}
DEFAULT_TAG_SORT = 'popular'
def tags(request):
"""Searchable Tag list."""
sort_type = request.GET.get('sort', DEFAULT_TAG_SORT)
if sort_type not in TAG_SORT:
sort_type = DEFAULT_TAG_SORT
tags = Tag.objects.all().order_by(*TAG_SORT[sort_type])
name_filter = request.GET.get('filter', '')
if name_filter:
tags = tags.filter(name__icontains=name_filter)
paginator = Paginator(tags, 50)
page = get_page(request, paginator)
return render_to_response('tags.html', {
'title': u'Tags',
'tags': page.object_list,
'page': page,
'sort': sort_type,
'filter': name_filter,
}, context_instance=RequestContext(request))
def tag(request, tag_name):
"""Displayed Questions for a Tag."""
raise NotImplementedError
USER_SORT = {
'reputation': ('-reputation', '-date_joined'),
'newest': ('-date_joined',),
'oldest': ('date_joined',),
'name': ('username',),
}
DEFAULT_USER_SORT = 'reputation'
def users(request):
"""Searchable User list."""
sort_type = request.GET.get('sort', DEFAULT_USER_SORT)
if sort_type not in USER_SORT:
sort_type = DEFAULT_USER_SORT
users = User.objects.all().order_by(*USER_SORT[sort_type])
name_filter = request.GET.get('filter', '')
if name_filter:
users = users.filter(username__icontains=name_filter)
users = users.values('id', 'username', 'gravatar', 'reputation', 'gold',
'silver', 'bronze')
paginator = Paginator(users, 28)
page = get_page(request, paginator)
return render_to_response('users.html', {
'title': u'Users',
'users': page.object_list,
'page': page,
'sort': sort_type,
'filter': name_filter,
}, context_instance=RequestContext(request))
def user(request, user_id):
"""Displays a User and various information about them."""
raise NotImplementedError
def badges(request):
"""Badge list."""
return render_to_response('badges.html', {
'title': u'Badges',
'badges': Badge.objects.all(),
}, context_instance=RequestContext(request))
def badge(request, badge_id):
"""Displays a Badge and any Users who have recently been awarded it."""
badge = get_object_or_404(Badge, id=badge_id)
awarded_to = badge.awarded_to.all().order_by('-award__awarded_at').values(
'id', 'username', 'reputation', 'gold', 'silver', 'bronze')[:500]
return render_to_response('badge.html', {
'title': '%s Badge' % badge.name,
'badge': badge,
'awarded_to': awarded_to,
}, context_instance=RequestContext(request))
|
|
#!/bin/env python
# ColorImgSort
# Sorts images based on color
# by Ed Salisbury <ed@edsalisbury.net>
# Written: 2013/12/24
import argparse
import os
import os.path
import math
from PIL import Image
class ImgSort():
def __init__(self, **kwargs):
"""Class constructor - sets up the internal variables"""
self.path = kwargs['path']
self.prefix = kwargs['prefix']
self.num_points = kwargs['num_points']
self.blur_radius = kwargs['blur_radius']
self.num_colors = kwargs['num_colors']
def points(self, width, height):
"""Returns points based on an image size and the number of points
Returns points that are distributed evenly across an image. If you
think about intersecting lines, 9 points would create 3 horizontal
lines and 3 vertical lines, and would return the 9 intersecting points.
Due to the way this works, the number of points must be a perfect
square (4, 9, 16, 25, etc.)
"""
num_lines = int(math.sqrt(self.num_points))
if num_lines ** 2 != self.num_points:
raise RuntimeError("Number of points must be a perfect square")
chunk_width = int(width / (num_lines + 1))
chunk_height = int(height / (num_lines + 1))
x_list = []
y_list = []
for i in range(1, num_lines + 1):
x_list.append(chunk_width * i)
y_list.append(chunk_height * i)
xy_list = []
for y in y_list:
for x in x_list:
xy_list.append((x, y))
return xy_list
def diamond_points(self, x, y):
"""Returns the list of points that form a diamond around the origin
Makes concentric diamonds around a point (up to the specified radius),
and returns all points within that diamond.
"""
points = []
orig_x = x
orig_y = y
# Origin
points.append((x, y))
for i in range(1, self.blur_radius + 1):
# Top
points.append((orig_x, orig_y - i))
# NE side
for x, y in zip(range(0, i), range(-i, 0)):
points.append((orig_x + x, orig_y + y))
# SE side
for x, y in zip(range(i, 0, -1), range(0, i)):
points.append((orig_x + x, orig_y + y))
# SW side
for x, y in zip(range(0, -i, -1), range(i, 0, -1)):
points.append((orig_x + x, orig_y + y))
# NW side
for x, y in zip(range(-i, 0), range(0, -i, -1)):
points.append((orig_x + x, orig_y + y))
return points
def sort(self):
"""Sorts a directory of images based on color
Opens a directory, gets the file list, and determines the color at
specific points in the images (using a simple blur), downsamples to
a lower bitdepth and stores the filename in a dictionary, based on the
color signature. When completed, it will rename the files based on the
dictionary entries, using the specified prefix.
"""
img_files = os.listdir(self.path)
img_list = {}
for img_file in img_files:
filename = os.path.join(self.path, img_file)
try:
img = Image.open(filename)
except:
continue
print "Analyzing %s" % img_file
points = self.points(img.size[0], img.size[1])
key = ""
for point in points:
# Get the average color for each point
ave_points = self.diamond_points(point[0], point[1])
red = 0
green = 0
blue = 0
for ave_point in ave_points:
try:
rgb = img.getpixel(ave_point)
red += rgb[0]
green += rgb[1]
blue += rgb[2]
except IndexError:
pass
red /= len(ave_points)
green /= len(ave_points)
blue /= len(ave_points)
# Bitdepths:
# 12 bit - 4096 colors, range 0-F, divide by 16
# 9 bit - 512 colors, range 0-7, divide by 32
# 6 bit - 64 colors, range 0-3, divide by 64
# 3 bit - 8 colors, range 0-1, divide by 128
if self.num_colors == 8:
div = 128
elif self.num_colors == 64:
div = 64
elif self.num_colors == 512:
div = 32
elif self.num_colors == 4096:
div = 16
else:
self.usage()
# Lower the bitdepth
red = int(red / div)
green = int(green / div)
blue = int(blue / div)
# Add to the key
key += "%x%x%x" % (red, green, blue)
# Add the key if needed
if key not in img_list:
img_list[key] = []
# Add the file to the list
img_list[key].append(img_file)
# Go through and rename the files, based on the img_list dictionary
# and the prefix
num = 1
for img in sorted(img_list.iterkeys()):
for filename in sorted(img_list[img]):
name, ext = os.path.splitext(filename)
new_filename = "%s%04d%s" % (self.prefix, num, ext)
full_filename = os.path.join(self.path, filename)
full_new_filename = os.path.join(self.path, new_filename)
if os.path.isfile(full_new_filename):
print "File %s exists - aborting!" % full_new_filename
return
os.rename(full_filename, full_new_filename)
print "Renamed %s to %s." % (filename, new_filename)
num += 1
if __name__ == '__main__':
description = "Sorts a directory of images"
epilog = ("Note: Number of points must be a perfect square "
"(4, 5, 9, etc.) (default is 9)")
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('path', help='directory of images to be sorted')
parser.add_argument('prefix', help='prefix to use for new images')
parser.add_argument('-c', dest='num_colors', type=int,
choices=[8, 64, 512, 4096], default=8,
help='number of colors to reduce to')
parser.add_argument('-n', dest='num_points', type=int, default=9,
help='number of points to use')
parser.add_argument('-r', dest='blur_radius', type=int, default=3,
help='blur radius')
args = parser.parse_args()
num_lines = int(math.sqrt(args.num_points))
if num_lines ** 2 != args.num_points:
print "Number of points must be a perfect square."
parser.print_help()
img = ImgSort(**vars(args))
img.sort()
|
|
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool, cache):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except Exception as e:
job, ind = task[:2]
try:
cache[job]._set(ind, (False, e))
except KeyError:
pass
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
|
|
"""Exception and error handling.
This contains the core exceptions that the implementations should raise
as well as the IActiveScriptError interface code.
"""
import sys, traceback
from win32com.axscript import axscript
import winerror
import win32com.server.exception
import win32com.server.util
import pythoncom
import re
debugging = 0
def FormatForAX(text):
"""Format a string suitable for an AX Host
"""
# Replace all " with ', so it works OK in HTML (ie, ASP)
return ExpandTabs(AddCR(text))
def ExpandTabs(text):
return re.sub('\t',' ', text)
def AddCR(text):
return re.sub('\n','\r\n',text)
class IActiveScriptError:
"""An implementation of IActiveScriptError
The ActiveX Scripting host calls this client whenever we report
an exception to it. This interface provides the exception details
for the host to report to the user.
"""
_com_interfaces_ = [axscript.IID_IActiveScriptError]
_public_methods_ = ["GetSourceLineText","GetSourcePosition","GetExceptionInfo"]
def _query_interface_(self, iid):
print "IActiveScriptError QI - unknown IID", iid
return 0
def _SetExceptionInfo(self, exc):
self.exception = exc
def GetSourceLineText(self):
return self.exception.linetext
def GetSourcePosition(self):
ctx = self.exception.sourceContext
# Zero based in the debugger (but our columns are too!)
return ctx, self.exception.lineno + self.exception.startLineNo-1, self.exception.colno
def GetExceptionInfo(self):
return self.exception
class AXScriptException(win32com.server.exception.COMException):
"""A class used as a COM exception.
Note this has attributes which conform to the standard attributes
for COM exceptions, plus a few others specific to our IActiveScriptError
object.
"""
def __init__(self, site, codeBlock, exc_type, exc_value, exc_traceback):
# set properties base class shares via base ctor...
win32com.server.exception.COMException.__init__( self, \
description = "Unknown Exception", \
scode = winerror.DISP_E_EXCEPTION, \
source = "Python ActiveX Scripting Engine",
)
# And my other values...
if codeBlock is None:
self.sourceContext = 0
self.startLineNo = 0
else:
self.sourceContext = codeBlock.sourceContextCookie
self.startLineNo = codeBlock.startLineNumber
self.linetext = ""
self.__BuildFromException(site, exc_type, exc_value, exc_traceback)
def __BuildFromException(self, site, type , value, tb):
if debugging:
import linecache
linecache.clearcache()
try:
if issubclass(type, SyntaxError):
self._BuildFromSyntaxError(site, value, tb)
else:
self._BuildFromOther(site, type, value, tb)
except: # Error extracting traceback info!!!
traceback.print_exc()
# re-raise.
raise
def _BuildFromSyntaxError(self, site, value, tb):
# All syntax errors should have a message as element 0
try:
msg = value[0]
except:
msg = "Unknown Error (%s)" % (value,)
try:
(filename, lineno, offset, line) = value[1]
# Some of these may be None, which upsets us!
if offset is None:
offset = 0
if line is None:
line = ""
except:
msg = "Unknown"
lineno = 0
offset = 0
line = "Unknown"
self.description=FormatForAX(msg)
self.lineno = lineno
self.colno = offset - 1
self.linetext = ExpandTabs(line.rstrip())
def _BuildFromOther(self, site, exc_type, value, tb):
self.colno = -1
self.lineno = 0
if debugging: # Full traceback if debugging.
list=traceback.format_exception(exc_type, value, tb)
self.description = ExpandTabs(''.join(list))
return
# Run down the traceback list, looking for the first "<Script..>"
# Hide traceback above this. In addition, keep going down
# looking for a "_*_" attribute, and below hide these also.
hide_names = ["r_import","r_reload","r_open"] # hide from these functions down in the traceback.
depth = None
tb_top = tb
while tb_top:
filename, lineno, name, line = self.ExtractTracebackInfo(tb_top, site)
if filename[:7]=="<Script":
break
tb_top = tb_top.tb_next
format_items = []
if tb_top: # found one.
depth = 0
tb_look = tb_top
# Look down for our bottom
while tb_look:
filename, lineno, name, line = self.ExtractTracebackInfo(tb_look, site)
if name in hide_names:
break
# We can report a line-number, but not a filename. Therefore,
# we return the last line-number we find in one of our script
# blocks.
if filename.startswith("<Script"):
self.lineno = lineno
self.linetext = line
format_items.append((filename, lineno, name, line))
depth = depth + 1
tb_look = tb_look.tb_next
else:
depth = None
tb_top = tb
bits = ['Traceback (most recent call last):\n']
bits.extend(traceback.format_list(format_items))
if exc_type==pythoncom.com_error:
desc = "%s (0x%x)" % (value[1], value[0])
if value[0]==winerror.DISP_E_EXCEPTION and value[2] and value[2][2]:
desc = value[2][2]
bits.append("COM Error: "+desc)
else:
bits.extend(traceback.format_exception_only(exc_type, value))
# XXX - this utf8 encoding seems bogus. From well before py3k,
# we had the comment:
# > all items in the list are utf8 courtesy of Python magically
# > converting unicode to utf8 before compilation.
# but that is likely just confusion from early unicode days;
# Python isn't doing it, pywin32 probably was, so 'mbcs' would
# be the default encoding. We should never hit this these days
# anyway, but on py3k, we *never* will, and str objects there
# don't have a decode method...
if sys.version_info < (3,):
for i in xrange(len(bits)):
if type(bits[i]) is str:
#assert type(bits[i]) is str, type(bits[i])
bits[i] = bits[i].decode('utf8')
self.description = ExpandTabs(u''.join(bits))
# Clear tracebacks etc.
tb = tb_top = tb_look = None
def ExtractTracebackInfo(self, tb, site):
import linecache
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
line = linecache.getline(filename, lineno)
if not line:
try:
codeBlock = site.scriptCodeBlocks[filename]
except KeyError:
codeBlock = None
if codeBlock:
# Note: 'line' will now be unicode.
line = codeBlock.GetLineNo(lineno)
if line:
line = line.strip()
else:
line = None
return filename, lineno, name, line
def __repr__(self):
return "AXScriptException Object with description:" + self.description
def ProcessAXScriptException(scriptingSite, debugManager, exceptionInstance):
"""General function to handle any exception in AX code
This function creates an instance of our IActiveScriptError interface, and
gives it to the host, along with out exception class. The host will
likely call back on the IActiveScriptError interface to get the source text
and other information not normally in COM exceptions.
"""
# traceback.print_exc()
instance = IActiveScriptError()
instance._SetExceptionInfo(exceptionInstance)
gateway = win32com.server.util.wrap(instance, axscript.IID_IActiveScriptError)
if debugManager:
fCallOnError = debugManager.HandleRuntimeError()
if not fCallOnError:
return None
try:
result = scriptingSite.OnScriptError(gateway)
except pythoncom.com_error, details:
print "**OnScriptError failed:", details
print "Exception description:'%s'" % (repr(exceptionInstance.description))
print "Exception text:'%s'" % (repr(exceptionInstance.linetext))
result = winerror.S_FALSE
if result==winerror.S_OK:
# If the above returns NOERROR, it is assumed the error has been
# correctly registered and the value SCRIPT_E_REPORTED is returned.
ret = win32com.server.exception.COMException(scode=axscript.SCRIPT_E_REPORTED)
return ret
else:
# The error is taken to be unreported and is propagated up the call stack
# via the IDispatch::Invoke's EXCEPINFO parameter (hr returned is DISP_E_EXCEPTION.
return exceptionInstance
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for scheduling."""
import collections
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova import rpc
LOG = logging.getLogger(__name__)
scheduler_opts = [
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_opts)
CONF.import_opt('scheduler_default_filters', 'nova.scheduler.host_manager')
GroupDetails = collections.namedtuple('GroupDetails', ['hosts', 'policies'])
def build_request_spec(ctxt, image, instances, instance_type=None):
"""Build a request_spec for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
"""
instance = instances[0]
if instance_type is None:
if isinstance(instance, objects.Instance):
instance_type = instance.get_flavor()
else:
instance_type = flavors.extract_flavor(instance)
if isinstance(instance, objects.Instance):
instance = instance_obj.compat_instance(instance)
if isinstance(instance_type, objects.Flavor):
instance_type = obj_base.obj_to_primitive(instance_type)
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances)}
return jsonutils.to_primitive(request_spec)
def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
ex, request_spec, db):
"""changes VM state and notifies."""
LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"),
{'service': service, 'method': method, 'ex': ex})
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
# NOTE(vish): We shouldn't get here unless we have a catastrophic
# failure, so just set the instance to its internal state
notifier = rpc.get_notifier(service)
state = vm_state.upper()
LOG.warning(_LW('Setting instance to %s state.'), state,
instance_uuid=instance_uuid)
instance = objects.Instance(context=context, uuid=instance_uuid,
**updates)
instance.obj_reset_changes(['uuid'])
instance.save()
compute_utils.add_instance_fault_from_exc(context,
instance, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_state,
method=method,
reason=ex)
event_type = '%s.%s' % (service, method)
notifier.error(context, event_type, payload)
def populate_filter_properties(filter_properties, host_state):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
if isinstance(host_state, dict):
host = host_state['host']
nodename = host_state['nodename']
limits = host_state['limits']
else:
host = host_state.host
nodename = host_state.nodename
limits = host_state.limits
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)
# Adds oversubscription policy
if not filter_properties.get('force_hosts'):
filter_properties['limits'] = limits
def populate_retry(filter_properties, instance_uuid):
max_attempts = _max_attempts()
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
# In the case of multiple force hosts/nodes, scheduler should not
# disable retry filter but traverse all force hosts/nodes one by
# one till scheduler gets a valid target host.
if (max_attempts == 1 or len(force_hosts) == 1
or len(force_nodes) == 1):
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
retry = filter_properties.setdefault(
'retry', {
'num_attempts': 0,
'hosts': [] # list of compute hosts tried
})
retry['num_attempts'] += 1
_log_compute_error(instance_uuid, retry)
exc = retry.pop('exc', None)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d '
'for instance %(instance_uuid)s. '
'Last exception: %(exc)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid,
'exc': exc})
raise exception.NoValidHost(reason=msg)
def _log_compute_error(instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.get('exc') # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_LE('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
def _max_attempts():
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
def _add_retry_host(filter_properties, host, node):
"""Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
node has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append([host, node])
def parse_options(opts, sep='=', converter=str, name=""):
"""Parse a list of options, each in the format of <key><sep><value>. Also
use the converter to convert the value into desired type.
:params opts: list of options, e.g. from oslo_config.cfg.ListOpt
:params sep: the separator
:params converter: callable object to convert the value, should raise
ValueError for conversion failure
:params name: name of the option
:returns: a lists of tuple of values (key, converted_value)
"""
good = []
bad = []
for opt in opts:
try:
key, seen_sep, value = opt.partition(sep)
value = converter(value)
except ValueError:
key = None
value = None
if key and seen_sep and value is not None:
good.append((key, value))
else:
bad.append(opt)
if bad:
LOG.warning(_LW("Ignoring the invalid elements of the option "
"%(name)s: %(options)s"),
{'name': name,
'options': ", ".join(bad)})
return good
def validate_filter(filter):
"""Validates that the filter is configured in the default filters."""
return filter in CONF.scheduler_default_filters
_SUPPORTS_AFFINITY = None
_SUPPORTS_ANTI_AFFINITY = None
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
enabled.
:param instance_uuid: UUID of the instance to check
:param user_group_hosts: Hosts from the group or empty set
:returns: None or namedtuple GroupDetails
"""
global _SUPPORTS_AFFINITY
if _SUPPORTS_AFFINITY is None:
_SUPPORTS_AFFINITY = validate_filter(
'ServerGroupAffinityFilter')
global _SUPPORTS_ANTI_AFFINITY
if _SUPPORTS_ANTI_AFFINITY is None:
_SUPPORTS_ANTI_AFFINITY = validate_filter(
'ServerGroupAntiAffinityFilter')
_supports_server_groups = any((_SUPPORTS_AFFINITY,
_SUPPORTS_ANTI_AFFINITY))
if not _supports_server_groups or not instance_uuid:
return
try:
group = objects.InstanceGroup.get_by_instance_uuid(context,
instance_uuid)
except exception.InstanceGroupNotFound:
return
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
if (not _SUPPORTS_AFFINITY and 'affinity' in group.policies):
msg = _("ServerGroupAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
if (not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' in group.policies):
msg = _("ServerGroupAntiAffinityFilter not configured")
LOG.error(msg)
raise exception.UnsupportedPolicyException(reason=msg)
group_hosts = set(group.get_hosts())
user_hosts = set(user_group_hosts) if user_group_hosts else set()
return GroupDetails(hosts=user_hosts | group_hosts,
policies=group.policies)
def setup_instance_group(context, request_spec, filter_properties):
"""Add group_hosts and group_policies fields to filter_properties dict
based on instance uuids provided in request_spec, if those instances are
belonging to a group.
:param request_spec: Request spec
:param filter_properties: Filter properties
"""
group_hosts = filter_properties.get('group_hosts')
# NOTE(sbauza) If there are multiple instance UUIDs, it's a boot
# request and they will all be in the same group, so it's safe to
# only check the first one.
instance_uuid = request_spec.get('instance_properties', {}).get('uuid')
group_info = _get_group_details(context, instance_uuid, group_hosts)
if group_info is not None:
filter_properties['group_updated'] = True
filter_properties['group_hosts'] = group_info.hosts
filter_properties['group_policies'] = group_info.policies
def retry_on_timeout(retries=1):
"""Retry the call in case a MessagingTimeout is raised.
A decorator for retrying calls when a service dies mid-request.
:param retries: Number of retries
:returns: Decorator
"""
def outer(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
attempt = 0
while True:
try:
return func(*args, **kwargs)
except messaging.MessagingTimeout:
attempt += 1
if attempt <= retries:
LOG.warning(_LW(
"Retrying %(name)s after a MessagingTimeout, "
"attempt %(attempt)s of %(retries)s."),
{'attempt': attempt, 'retries': retries,
'name': func.__name__})
else:
raise
return wrapped
return outer
retry_select_destinations = retry_on_timeout(_max_attempts() - 1)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import time
from django.conf import settings
from django.core import mail
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.tests import addon_factory, user_factory
from olympia.addons.models import Addon, AddonUser
from olympia.versions.models import (
Version, version_uploaded, ApplicationsVersions)
from olympia.files.models import File
from olympia.applications.models import AppVersion
from olympia.editors.models import (
EditorSubscription, RereviewQueueTheme, ReviewerScore, send_notifications,
ViewFullReviewQueue, ViewPendingQueue,
ViewPreliminaryQueue, ViewUnlistedAllList, ViewUnlistedFullReviewQueue,
ViewUnlistedPendingQueue, ViewUnlistedPreliminaryQueue)
from olympia.users.models import UserProfile
def create_addon_file(name, version_str, addon_status, file_status,
platform=amo.PLATFORM_ALL, application=amo.FIREFOX,
admin_review=False, addon_type=amo.ADDON_EXTENSION,
created=None, file_kw=None, version_kw=None,
listed=True, nomination=None):
if file_kw is None:
file_kw = {}
if version_kw is None:
version_kw = {}
app_vr, created_ = AppVersion.objects.get_or_create(
application=application.id, version='1.0')
ad, created_ = Addon.with_unlisted.get_or_create(
name__localized_string=name,
defaults={'type': addon_type, 'name': name, 'is_listed': listed})
if admin_review:
ad.update(admin_review=True)
vr, created_ = Version.objects.get_or_create(addon=ad, version=version_str,
defaults=version_kw)
if nomination is not None:
vr.nomination = nomination
vr.save()
if not created_:
vr.update(**version_kw)
va, created_ = ApplicationsVersions.objects.get_or_create(
version=vr, application=application.id, min=app_vr, max=app_vr)
file_defaults = {
'version': vr,
'filename': u"%s.xpi" % name,
'platform': platform.id,
'status': file_status,
'no_restart': True
}
file_defaults.update(file_kw)
file_ = File.objects.create(**file_defaults)
if created:
vr.update(created=created)
file_.update(created=created)
# Update status *after* we are done creating/modifying version and files:
Addon.with_unlisted.get(pk=ad.id).update(status=addon_status)
return {'addon': ad, 'version': vr, 'file': file_}
def create_search_ext(name, version_str, addon_status, file_status,
listed=True):
ad, created_ = Addon.with_unlisted.get_or_create(
name__localized_string=name,
defaults={'type': amo.ADDON_SEARCH, 'name': name, 'is_listed': listed})
vr, created_ = Version.objects.get_or_create(addon=ad, version=version_str)
File.objects.create(version=vr, filename=u"%s.xpi" % name,
platform=amo.PLATFORM_ALL.id, status=file_status)
# Update status *after* there are files:
Addon.with_unlisted.get(pk=ad.id).update(status=addon_status)
return ad
class TestQueue(TestCase):
"""Tests common attributes and coercions that each view must support."""
__test__ = False # this is an abstract test case
listed = True # Are we testing listed or unlisted queues?
def test_latest_version(self):
self.new_file(version=u'0.1', created=self.days_ago(2))
self.new_file(version=u'0.2', created=self.days_ago(1))
self.new_file(version=u'0.3')
row = self.Queue.objects.get()
assert row.latest_version == '0.3'
def test_file_platforms(self):
# Here's a dupe platform in another version:
self.new_file(version=u'0.1', platform=amo.PLATFORM_MAC,
created=self.days_ago(1))
self.new_file(version=u'0.2', platform=amo.PLATFORM_LINUX)
self.new_file(version=u'0.2', platform=amo.PLATFORM_MAC)
row = self.Queue.objects.get()
assert sorted(row.platforms) == (
[amo.PLATFORM_LINUX.id, amo.PLATFORM_MAC.id])
def test_file_applications(self):
self.new_file(version=u'0.1', application=amo.FIREFOX)
self.new_file(version=u'0.1', application=amo.THUNDERBIRD)
# Duplicate:
self.new_file(version=u'0.1', application=amo.FIREFOX)
row = self.Queue.objects.get()
assert sorted(row.application_ids) == (
[amo.FIREFOX.id, amo.THUNDERBIRD.id])
def test_addons_disabled_by_user_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(disabled_by_user=True)
assert list(self.Queue.objects.all()) == []
def test_addons_disabled_by_admin_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(status=amo.STATUS_DISABLED)
assert list(self.Queue.objects.all()) == []
def test_reviewed_files_are_hidden(self):
self.new_file(name='Unreviewed', version=u'0.1')
self.new_file('Already Reviewed', '0.1',
amo.STATUS_PUBLIC, amo.STATUS_NULL)
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Unreviewed'])
def test_search_extensions(self):
self.new_search_ext('Search Tool', '0.1')
row = self.Queue.objects.get()
assert row.addon_name == u'Search Tool'
assert row.application_ids == []
assert row.platforms == [amo.PLATFORM_ALL.id]
def test_count_all(self):
self.new_file(name='Addon 1', version=u'0.1')
self.new_file(name='Addon 1', version=u'0.2')
self.new_file(name='Addon 2', version=u'0.1')
self.new_file(name='Addon 2', version=u'0.2')
assert self.Queue.objects.all().count() == 2
class TestPendingQueue(TestQueue):
__test__ = True
Queue = ViewPendingQueue
def new_file(self, name=u'Pending', version=u'1.0',
addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_UNREVIEWED, **kw):
# Create the addon and everything related. Note that we are cheating,
# the addon status might not correspond to the files attached. This is
# important not to re-save() attached versions and files afterwards,
# because that might alter the addon status.
return create_addon_file(name, version, addon_status, file_status,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(created=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
def test_flags_admin_review(self):
f = self.new_file(version=u'0.1')
f['addon'].update(admin_review=True)
q = self.Queue.objects.get()
assert q.flags == [('admin-review', 'Admin Review')]
def test_flags_info_request(self):
self.new_file(version=u'0.1', version_kw={'has_info_request': True})
q = self.Queue.objects.get()
assert q.flags == [('info', 'More Information Requested')]
def test_flags_editor_comment(self):
self.new_file(version=u'0.1', version_kw={'has_editor_comment': True})
q = self.Queue.objects.get()
assert q.flags == [('editor', 'Contains Editor Comment')]
def test_flags_jetpack(self):
self.new_file(version=u'0.1', file_kw={'jetpack_version': '1.8',
'no_restart': True})
q = self.Queue.objects.get()
assert q.flags == [('jetpack', 'Jetpack Add-on')]
def test_flags_requires_restart(self):
self.new_file(version=u'0.1', file_kw={'no_restart': False})
q = self.Queue.objects.get()
assert q.flags == [('requires_restart', 'Requires Restart')]
def test_flags_sources_provided(self):
f = self.new_file(version=u'0.1')
f['addon'].versions.update(source='/some/source/file')
q = self.Queue.objects.get()
assert q.flags == [('sources-provided', 'Sources provided')]
def test_flags_webextension(self):
self.new_file(version=u'0.1', file_kw={'is_webextension': True})
queue = self.Queue.objects.get()
assert queue.flags == [('webextension', 'WebExtension')]
def test_no_flags(self):
self.new_file(version=u'0.1')
q = self.Queue.objects.get()
assert q.flags == []
class TestFullReviewQueue(TestQueue):
__test__ = True
Queue = ViewFullReviewQueue
def new_file(self, name=u'Nominated', version=u'1.0',
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_UNREVIEWED, **kw):
return create_addon_file(name, version, addon_status, file_status,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_lite_review_addons_also_shows_up(self):
create_addon_file('Full', '0.1',
amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED,
listed=self.listed)
create_addon_file('Lite', '0.1',
amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_LITE, listed=self.listed)
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Full', 'Lite'])
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(nomination=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
class TestPreliminaryQueue(TestQueue):
__test__ = True
Queue = ViewPreliminaryQueue
def new_file(self, name=u'Preliminary', version=u'1.0',
addon_status=amo.STATUS_LITE,
file_status=amo.STATUS_UNREVIEWED, **kw):
return create_addon_file(name, version, addon_status, file_status,
listed=self.listed, **kw)
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
listed=self.listed, **kw)
def test_unreviewed_addons_are_in_q(self):
create_addon_file('Lite', '0.1',
amo.STATUS_LITE, amo.STATUS_UNREVIEWED,
listed=self.listed)
create_addon_file('Unreviewed', '0.1',
amo.STATUS_UNREVIEWED, amo.STATUS_UNREVIEWED,
listed=self.listed)
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Lite', 'Unreviewed'])
def test_waiting_time(self):
self.new_file(name='Addon 1', version=u'0.1')
Version.objects.update(created=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone might be off due to your MySQL install, hard to test this.
assert row.waiting_time_min is not None
assert row.waiting_time_hours is not None
class TestUnlistedPendingQueue(TestPendingQueue):
Queue = ViewUnlistedPendingQueue
listed = False
class TestUnlistedFullReviewQueue(TestFullReviewQueue):
Queue = ViewUnlistedFullReviewQueue
listed = False
class TestUnlistedPreliminaryQueue(TestPreliminaryQueue):
Queue = ViewUnlistedPreliminaryQueue
listed = False
class TestUnlistedAllList(TestCase):
Queue = ViewUnlistedAllList
listed = False
fixtures = ['base/users']
def new_file(self, name=u'Preliminary', version=u'1.0',
addon_status=amo.STATUS_LITE,
file_status=amo.STATUS_UNREVIEWED, **kw):
return create_addon_file(name, version, addon_status, file_status,
listed=self.listed, **kw)
def test_all_addons_are_in_q(self):
self.new_file('Lite', addon_status=amo.STATUS_LITE,
file_status=amo.STATUS_UNREVIEWED)
self.new_file('Unreviewed', addon_status=amo.STATUS_UNREVIEWED,
file_status=amo.STATUS_UNREVIEWED)
self.new_file('Public', addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC)
self.new_file('Nominated', addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_UNREVIEWED)
self.new_file('Deleted', addon_status=amo.STATUS_PUBLIC,
file_status=amo.STATUS_PUBLIC)['addon'].delete()
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Deleted', 'Lite', 'Nominated', 'Public', 'Unreviewed'])
def test_authors(self):
addon = self.new_file()['addon']
bert = user_factory(username='bert')
ernie = user_factory(username='ernie')
AddonUser.objects.create(addon=addon, user=bert)
AddonUser.objects.create(addon=addon, user=ernie)
row = self.Queue.objects.all()[0]
self.assertSetEqual(set(row.authors),
{(ernie.id, 'ernie'), (bert.id, 'bert')})
def test_last_reviewed_version(self):
today = datetime.today().date()
self.new_file(name='addon123', version='1.0')
v2 = self.new_file(name='addon123', version='2.0')['version']
log = amo.log(amo.LOG.PRELIMINARY_VERSION, v2, v2.addon,
user=UserProfile.objects.get(pk=999))
self.new_file(name='addon123', version='3.0')
row = self.Queue.objects.all()[0]
assert row.review_date == today
assert row.review_version_num == '2.0'
assert row.review_log_id == log.id
def test_no_developer_actions(self):
ver = self.new_file(name='addon456', version='1.0')['version']
amo.log(amo.LOG.ADD_VERSION, ver, ver.addon,
user=UserProfile.objects.get(pk=999))
row = self.Queue.objects.all()[0]
assert row.review_version_num is None
ver = self.new_file(name='addon456', version='2.0')['version']
amo.log(amo.LOG.PRELIMINARY_VERSION, ver, ver.addon,
user=UserProfile.objects.get(pk=999))
row = self.Queue.objects.all()[0]
assert row.review_version_num == '2.0'
ver = self.new_file(name='addon456', version='3.0')['version']
amo.log(amo.LOG.EDIT_VERSION, ver, ver.addon,
user=UserProfile.objects.get(pk=999))
row = self.Queue.objects.all()[0]
# v2.0 is still the last reviewed version.
assert row.review_version_num == '2.0'
def test_no_automatic_reviews(self):
ver = self.new_file(name='addon789', version='1.0')['version']
amo.log(amo.LOG.PRELIMINARY_VERSION, ver, ver.addon,
user=UserProfile.objects.get(pk=settings.TASK_USER_ID))
row = self.Queue.objects.all()[0]
assert row.review_version_num is None
def test_latest_version(self):
self.new_file(version=u'0.1', created=self.days_ago(2))
self.new_file(version=u'0.2', created=self.days_ago(1))
self.new_file(version=u'0.3')
row = self.Queue.objects.get()
assert row.latest_version == '0.3'
def test_addons_disabled_by_user_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(disabled_by_user=True)
assert list(self.Queue.objects.all()) == []
def test_addons_disabled_by_admin_are_hidden(self):
f = self.new_file(version=u'0.1')
f['addon'].update(status=amo.STATUS_DISABLED)
assert list(self.Queue.objects.all()) == []
def test_count_all(self):
self.new_file(name='Addon 1', version=u'0.1')
self.new_file(name='Addon 1', version=u'0.2')
self.new_file(name='Addon 2', version=u'0.1')
self.new_file(name='Addon 2', version=u'0.2')
assert self.Queue.objects.all().count() == 2
class TestEditorSubscription(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestEditorSubscription, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.current_version
self.user_one = UserProfile.objects.get(pk=55021)
self.user_two = UserProfile.objects.get(pk=999)
for user in [self.user_one, self.user_two]:
EditorSubscription.objects.create(addon=self.addon, user=user)
def test_email(self):
es = EditorSubscription.objects.get(user=self.user_one)
es.send_notification(self.version)
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [u'del@icio.us']
assert mail.outbox[0].subject == (
'Mozilla Add-ons: Delicious Bookmarks Updated')
def test_notifications(self):
send_notifications(sender=self.version)
assert len(mail.outbox) == 2
emails = sorted([o.to for o in mail.outbox])
assert emails == [[u'del@icio.us'], [u'regular@mozilla.com']]
def test_notifications_clean(self):
send_notifications(Version, self.version)
assert EditorSubscription.objects.count() == 0
mail.outbox = []
send_notifications(Version, self.version)
assert len(mail.outbox) == 0
def test_notifications_beta(self):
self.version.all_files[0].update(status=amo.STATUS_BETA)
version_uploaded.send(sender=self.version)
assert len(mail.outbox) == 0
def test_signal_edit(self):
self.version.save()
assert len(mail.outbox) == 0
def test_signal_create(self):
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
assert len(mail.outbox) == 2
assert mail.outbox[0].subject == (
'Mozilla Add-ons: Delicious Bookmarks Updated')
def test_signal_create_twice(self):
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
mail.outbox = []
v = Version.objects.create(addon=self.addon)
version_uploaded.send(sender=v)
assert len(mail.outbox) == 0
class TestReviewerScore(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestReviewerScore, self).setUp()
self.addon = amo.tests.addon_factory(status=amo.STATUS_NOMINATED)
self.user = UserProfile.objects.get(email='editor@mozilla.com')
def _give_points(self, user=None, addon=None, status=None):
user = user or self.user
addon = addon or self.addon
ReviewerScore.award_points(user, addon, status or addon.status)
def check_event(self, type, status, event, **kwargs):
self.addon.type = type
assert ReviewerScore.get_event(self.addon, status, **kwargs) == event
def test_events_addons(self):
types = {
amo.ADDON_ANY: None,
amo.ADDON_EXTENSION: 'ADDON',
amo.ADDON_THEME: 'THEME',
amo.ADDON_DICT: 'DICT',
amo.ADDON_SEARCH: 'SEARCH',
amo.ADDON_LPAPP: 'LP',
amo.ADDON_LPADDON: 'LP',
amo.ADDON_PLUGIN: 'ADDON',
amo.ADDON_API: 'ADDON',
amo.ADDON_PERSONA: 'PERSONA',
}
statuses = {
amo.STATUS_NULL: None,
amo.STATUS_UNREVIEWED: 'PRELIM',
amo.STATUS_PENDING: None,
amo.STATUS_NOMINATED: 'FULL',
amo.STATUS_PUBLIC: 'UPDATE',
amo.STATUS_DISABLED: None,
amo.STATUS_BETA: None,
amo.STATUS_LITE: 'PRELIM',
amo.STATUS_LITE_AND_NOMINATED: 'FULL',
amo.STATUS_DELETED: None,
amo.STATUS_REJECTED: None,
amo.STATUS_REVIEW_PENDING: None,
}
for tk, tv in types.items():
for sk, sv in statuses.items():
try:
event = getattr(amo, 'REVIEWED_%s_%s' % (tv, sv))
except AttributeError:
try:
event = getattr(amo, 'REVIEWED_%s' % tv)
except AttributeError:
event = None
self.check_event(tk, sk, event)
def test_award_points(self):
self._give_points()
assert ReviewerScore.objects.all()[0].score == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_award_points_bonus(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
bonus_days = 2
days = amo.REVIEWED_OVERDUE_LIMIT + bonus_days
addon_objects = create_addon_file(
u'AwardBonus',
u'1.0',
amo.STATUS_NOMINATED,
amo.STATUS_UNREVIEWED,
nomination=(datetime.now() - timedelta(days=days))
)
self._give_points(user2, addon_objects['addon'], 1)
score = ReviewerScore.objects.get(user=user2)
expected = (amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM] +
(amo.REVIEWED_OVERDUE_BONUS * bonus_days))
assert score.score == expected
def test_award_moderation_points(self):
ReviewerScore.award_moderation_points(self.user, self.addon, 1)
score = ReviewerScore.objects.all()[0]
assert score.score == (
amo.REVIEWED_SCORES.get(amo.REVIEWED_ADDON_REVIEW))
assert score.note_key == amo.REVIEWED_ADDON_REVIEW
def test_get_total(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
assert ReviewerScore.get_total(self.user) == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
assert ReviewerScore.get_total(user2) == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_recent(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
time.sleep(1) # Wait 1 sec so ordering by created is checked.
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2)
scores = ReviewerScore.get_recent(self.user)
assert len(scores) == 2
assert scores[0].score == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
assert scores[1].score == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_leaderboards(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
assert leaders['user_rank'] == 1
assert leaders['leader_near'] == []
assert leaders['leader_top'][0]['rank'] == 1
assert leaders['leader_top'][0]['user_id'] == self.user.id
assert leaders['leader_top'][0]['total'] == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
assert leaders['leader_top'][1]['rank'] == 2
assert leaders['leader_top'][1]['user_id'] == user2.id
assert leaders['leader_top'][1]['total'] == (
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
self._give_points(
user=user2, addon=amo.tests.addon_factory(type=amo.ADDON_PERSONA))
leaders = ReviewerScore.get_leaderboards(
self.user, addon_type=amo.ADDON_PERSONA)
assert len(leaders['leader_top']) == 1
assert leaders['leader_top'][0]['user_id'] == user2.id
def test_no_admins_or_staff_in_leaderboards(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
assert leaders['user_rank'] == 1
assert leaders['leader_near'] == []
assert leaders['leader_top'][0]['user_id'] == self.user.id
assert len(leaders['leader_top']) == 1 # Only the editor is here.
assert user2.id not in [l['user_id'] for l in leaders['leader_top']], (
'Unexpected admin user found in leaderboards.')
def test_get_leaderboards_last(self):
users = []
for i in range(6):
users.append(UserProfile.objects.create(username='user-%s' % i))
last_user = users.pop(len(users) - 1)
for u in users:
self._give_points(user=u)
# Last user gets lower points by reviewing a persona.
addon = self.addon
addon.type = amo.ADDON_PERSONA
self._give_points(user=last_user, addon=addon)
leaders = ReviewerScore.get_leaderboards(last_user)
assert leaders['user_rank'] == 6
assert len(leaders['leader_top']) == 3
assert len(leaders['leader_near']) == 2
def test_all_users_by_score(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
amo.REVIEWED_LEVELS[0]['points'] = 180
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
users = ReviewerScore.all_users_by_score()
assert len(users) == 2
# First user.
assert users[0]['total'] == 180
assert users[0]['user_id'] == self.user.id
assert users[0]['level'] == amo.REVIEWED_LEVELS[0]['name']
# Second user.
assert users[1]['total'] == 120
assert users[1]['user_id'] == user2.id
assert users[1]['level'] == ''
def test_caching(self):
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_breakdown(self.user)
# New points invalidates all caches.
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
class TestRereviewQueueTheme(TestCase):
def test_manager_soft_delete_addons(self):
"""Test manager excludes soft delete add-ons."""
# Normal RQT object.
RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
# Deleted add-on RQT object.
addon = addon_factory(type=amo.ADDON_PERSONA)
RereviewQueueTheme.objects.create(
theme=addon.persona, header='', footer='')
addon.delete()
assert RereviewQueueTheme.objects.count() == 1
assert RereviewQueueTheme.unfiltered.count() == 2
def test_footer_path_without_footer(self):
rqt = RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
assert rqt.footer_path == ''
def test_footer_url_without_footer(self):
rqt = RereviewQueueTheme.objects.create(
theme=addon_factory(type=amo.ADDON_PERSONA).persona, header='',
footer='')
assert rqt.footer_url == ''
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
addon = addon_factory(type=amo.ADDON_PERSONA)
rqt = RereviewQueueTheme.objects.create(theme=addon.persona)
assert addon.persona.rereviewqueuetheme_set.get() == rqt
# Delete the addon: it shouldn't be listed anymore.
addon.update(status=amo.STATUS_DELETED)
assert addon.persona.rereviewqueuetheme_set.all().count() == 0
|
|
#!/usr/bin/env python3
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
# import imp
import glob
from time import sleep
import fileinput
import numpy as np
import pandas as pd
# from small_script.variable_test import variable_test
# from small_script.variable_test2 import variable_test2
import subprocess
# from small_script.myFunctions import compute_theta_for_each_helix
# from small_script.myFunctions import *
from collections import defaultdict
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import three_to_one
# Useful codes
# os.system("awk '{print $NF}' all_wham.dat > e_total")
# tr " " "\n"
# sed 1d
# sort -u -k 3
# sed -e 's/+T//'
# import re
# numbers = re.compile(r'(\d+)')
# def numericalSort(value):
# parts = numbers.split(value)
# parts[1::2] = map(int, parts[1::2])
# return parts
# mypath = os.environ["PATH"]
# os.environ["PATH"] = "/home/wl45/python/bin:/home/wl45/opt:" + mypath
# my_env = os.environ.copy()
parser = argparse.ArgumentParser(description="This is my playground for current project")
parser.add_argument("-r", "--run", help="test mode",
action="store_true")
parser.add_argument("-s", "--see", help="test mode",
action="store_true")
# parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-m", "--mode", type=int, default=0)
parser.add_argument("-d", "--day", type=str, default="someday")
parser.add_argument("-l", "--label", type=str, default="label")
parser.add_argument("-t", "--test", action="store_true", default=False)
args = parser.parse_args()
# if args.test:
# do = print
# else:
# do = os.system
with open('cmd_optimization.txt', 'a') as f:
f.write(' '.join(sys.argv))
f.write('\n')
def do(cmd, get=False, show=True):
if get:
out = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
if show:
print(out, end="")
return out
else:
return subprocess.Popen(cmd, shell=True).wait()
cd = os.chdir
base_run_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=12
#SBATCH --mem-per-cpu=1G
#SBATCH --time=1-00:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
module load GCC/4.9.3 OpenMPI/1.8.8
srun /home/wl45/build/awsem_lipid_fluctuations/src/lmp_mpi -p 12x1 -in 2xov_{}.in\n'''
base_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=1-00:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
#SBATCH -o outs/slurm-%j.out
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}\n'''
base_casp14_slurm = '''\
#!/bin/bash
#SBATCH --reservation=casp14
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=1
##SBATCH --gres=gpu:1
##SBATCH --gres=gpu:1
#SBATCH --time=1:00:00
##SBATCH --mem-per-cpu=10G
#SBATCH --export=ALL
#SBATCH -o outs/slurm-%j.out
#SBATCH --mail-user=luwei0917@gmail.com
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}\n'''
scavenge_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=commons
#SBATCH --partition=scavenge
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=04:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
#SBATCH -o outs/slurm-%j.out
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}\n'''
gpu_base_slurm = '''\
#!/bin/bash
#SBATCH --account=commons
#SBATCH --partition=scavenge
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=1
#SBATCH --gres=gpu:volta:1
#SBATCH --time=04:00:00
#SBATCH --mem-per-cpu=10G
#SBATCH --export=ALL
#SBATCH -o outs/slurm-%j.out
#SBATCH --mail-user=luwei0917@gmail.com
module load GCC/8.3.0 CUDA/10.1.168
srun {}
'''
gpu_commons_slurm = '''\
#!/bin/bash
#SBATCH --account=commons
#SBATCH --partition=commons
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=1
#SBATCH --gres=gpu:volta:1
#SBATCH --time=23:00:00
#SBATCH --mem-per-cpu=10G
#SBATCH --export=ALL
#SBATCH -o outs/slurm-%j.out
#SBATCH --mail-user=luwei0917@gmail.com
module load GCC/8.3.0 CUDA/10.1.168
srun {}
'''
# def replace(TARGET, FROM, TO):
# do("sed -i.bak 's/{}/{}/g' {}".format(FROM,TO,TARGET))
def scancel_jobs_in_folder(folder):
cd(bias)
cmd = "find -name 'slurm-*' | rev | awk -F'[-.]' '{print $2}' | rev"
lines = getFromTerminal(cmd).splitlines()
for line in lines:
# print(line)
do("scancel " + line)
cd("..")
localQ_slurm = '''#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=00:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python3 ~/opt/gg_server.py -d feb28 -m 2
'''
quick_slurm = '''#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=00:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python3 ~/opt/gg_server.py -d mar03 -m 3
'''
freeEnergy = """\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --mem-per-cpu=30G
#SBATCH --time=23:00:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python2 ~/opt/pulling_compute-pmf.py {}
"""
quick_template_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=01:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun {}
'''
quick_template_large_mem_slurm = '''\
#!/bin/bash
#SBATCH --job-name=CTBP_WL
#SBATCH --account=ctbp-common
#SBATCH --partition=ctbp-common
#SBATCH --ntasks=1
#SBATCH --mem-per-cpu=20G
#SBATCH --time=01:30:00
#SBATCH --mail-user=luwei0917@gmail.com
#SBATCH --mail-type=FAIL
echo "My job ran on:"
echo $SLURM_NODELIST
srun python3 ~/opt/gg_server.py {}
'''
def getFromTerminal(CMD):
return subprocess.Popen(CMD,stdout=subprocess.PIPE,shell=True).communicate()[0].decode()
def split_proteins_name_list(pdb_per_txt=1):
with open("protein_list") as f:
content = f.readlines()
pos = 0
i = 0
n = len(content)
# n = 100 # for testing
while pos < n:
with open(f"proteins_name_list/proteins_name_list_{i}.txt", "w") as out:
for ii in range(pdb_per_txt):
if pos < n:
out.write(content[pos])
pos += 1
i += 1
print(i)
n = i
return n
def compute_quantity(cmd="", queue=1, sim_list=["0"], bias=""):
"""Compute some quantity.
Queue 0 is ctbp-common, 1 is interactive, 2 is commons.
bias is pre name of simulation folder
"""
# print(cmd)
simulation_list = glob.glob(f"{bias}*")
print(simulation_list)
dateAndTime = datetime.today().strftime('%d_%h_%H%M%S')
# print(sim_list)
for sim in sim_list:
for folder in simulation_list:
cd(folder)
cd(sim)
print(folder)
quick = quick_template_slurm.format(cmd)
with open(f"quick_{dateAndTime}.slurm", "w") as f:
if queue == 1:
quick = quick.replace("--time=01:30:00", "--time=00:30:00")
quick = quick.replace("#SBATCH --account=ctbp-common", "")
quick = quick.replace("ctbp-common", "interactive")
if queue == 2:
quick = quick.replace("ctbp-common", "commons")
f.write(quick)
do(f"sbatch quick_{dateAndTime}.slurm")
cd("../..")
def compute_completeZ(temper=False, **kwargs):
print("compute completeZ")
if temper:
cmd = "python3 ~/opt/gg_server.py -d mar10 -m 7"
else:
cmd = "python3 ~/opt/gg_server.py -d mar16 -m 3"
compute_quantity(cmd=cmd, **kwargs)
def compute_disReal(temper=False, targetMode=0, name="2xov", **kwargs):
print("compute DisReal")
# if targetMode == 0:
if temper:
cmd = f"python3 ~/opt/small_script/temper_compute_HQ.py {name} -t {targetMode}"
else:
cmd = f"python3 ~/opt/small_script/find_distance.py {name} -t {targetMode}"
# elif targetMode == 1:
# if temper:
# cmd = "python3 ~/opt/gg_server.py -d apr01 -m 3"
# else:
# cmd = f"python3 ~/opt/small_script/find_distance.py -t {targetMode}"
compute_quantity(cmd=cmd, **kwargs)
# def compute_NsideEnergy(temper=False, targetMode=0, name="2xov", **kwargs):
# print("compute DisReal")
# # if targetMode == 0:
# if temper:
# cmd = f"python3 ~/opt/gg_server.py -d mar17 -m 1"
# else:
# cmd = f"python3 ~/opt/small_script/find_distance.py {name} -t {targetMode}"
# # elif targetMode == 1:
# # if temper:
# # cmd = "python3 ~/opt/gg_server.py -d apr01 -m 3"
# # else:
# # cmd = f"python3 ~/opt/small_script/find_distance.py -t {targetMode}"
# compute_quantity(cmd=cmd, **kwargs)
def let_compute_localQ(temper=False, **kwargs):
print("compute LocalQ")
if temper:
cmd = "python3 ~/opt/gg_server.py -d mar28 -m 1"
# if temper:
# cmd = "python3 ~/opt/gg_server.py -d feb28 -m 2"
# else:
# pass
# cmd = "python3 ~/opt/small_script/find_distance.py"
# native_contacts_table = compute_localQ_init()
# for i in range(12):
# compute_localQ(native_contacts_table, pre=".", ii=i)
compute_quantity(cmd=cmd, **kwargs)
if args.day == "common":
if args.mode == 4:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# simulation_list = ['dis_30.0']
# simulation_list = ['dis_116.0', 'dis_332.0', 'dis_128.0', 'dis_266.0', 'dis_296.0', 'dis_290.0', 'dis_314.0', 'dis_176.0', 'dis_272.0', 'dis_284.0', 'dis_158.0', 'dis_338.0']
# simulation_list = ['dis_326.0', 'dis_206.0', 'dis_254.0', 'dis_344.0', 'dis_308.0', 'dis_134.0', 'dis_152.0', 'dis_194.0', 'dis_320.0', 'dis_200.0', 'dis_212.0', 'dis_110.0', 'dis_248.0', 'dis_188.0', 'dis_242.0', 'dis_218.0', 'dis_350.0', 'dis_164.0', 'dis_236.0', 'dis_146.0', 'dis_182.0', 'dis_140.0', 'dis_122.0', 'dis_302.0', 'dis_224.0', 'dis_230.0', 'dis_278.0', 'dis_260.0', 'dis_170.0']
# simulation_list = ['dis_86.0', 'dis_84.0', 'dis_76.0', 'dis_72.0', 'dis_54.0', 'dis_70.0', 'dis_50.0', 'dis_56.0', 'dis_80.0', 'dis_30.0', 'dis_88.0', 'dis_44.0', 'dis_46.0', 'dis_96.0', 'dis_38.0']
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
# cd("0")
cd("1")
# cd("2")
# cd("3")
# rerun(extra="Go", mode=2)
# rerun(extra="Go_3helix", mode=3)
rerun(extra="Go_4helix", mode=4)
cd("../..")
if args.mode == 3:
goEnergy = False
goEnergy3H = False
goEnergy4H = True
rerun = 3
end = 2
cwd = os.getcwd()
print(cwd)
pre = '/'.join(cwd.split("/")[:-2]) + "/"
print(pre)
# exit()
# pre = "/scratch/wl45/apr_2018/sixth/"
data_folder = "/scratch/wl45/aug_2018/02_week/freeEnergy/all_data_folder/"
# folder_list = ["rg_0.15_lipid_1.0_mem_1_go_0.8_long"]
folder_list = [cwd.split("/")[-2]]
# label = "sixth_long"
label = args.label
# with open(label, "w") as f:
# f.write("test\n")
# cd("simulation")
# exit()
# folder_list = ["23oct/memb_3_rg_0.1_lipid_1_extended"]
# folder_list = ["rgWidth_memb_3_rg_0.1_lipid_1_extended",
# "rgWidth_memb_3_rg_0.1_lipid_1_topology",
# "expand_distance_rgWidth_memb_3_rg_0.1_lipid_1_extended"]
for attempt in range(2):
try:
process_complete_temper_data_3(pre, data_folder, folder_list, rerun=rerun, end=end, average_z=True, disReal=True, dis_h56=True, localQ=False, goEnergy=goEnergy, goEnergy3H=goEnergy3H, goEnergy4H=goEnergy4H, label=label)
break
except FileNotFoundError:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# simulation_list = ['dis_30.0']
# simulation_list = ['dis_86.0', 'dis_84.0', 'dis_76.0', 'dis_72.0', 'dis_54.0', 'dis_70.0', 'dis_50.0', 'dis_56.0', 'dis_80.0', 'dis_30.0', 'dis_88.0', 'dis_44.0', 'dis_46.0', 'dis_96.0', 'dis_38.0']
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
i = rerun
i_plus_one = i +1
if os.path.exists(f"log{i}"):
do(f"mv log{i} back_log{i}") # in case of override
do(f"mkdir -p log{i}")
do(f"cp log.lammps log{i}/")
cd("..")
if args.mode == 2:
bias = "dis"
simulation_list = glob.glob(f"{bias}_*")
# simulation_list = ['dis_30.0']
# simulation_list = ['dis_86.0', 'dis_84.0', 'dis_76.0', 'dis_72.0', 'dis_54.0', 'dis_70.0', 'dis_50.0', 'dis_56.0', 'dis_80.0', 'dis_30.0', 'dis_88.0', 'dis_44.0', 'dis_46.0', 'dis_96.0', 'dis_38.0']
print(simulation_list)
for dis in simulation_list:
print(dis)
cd(dis)
i = 2
i_plus_one = i +1
# do(f"mv log{i} back_log{i}") # in case of override
# do(f"mkdir -p log{i}")
# do(f"cp log.lammps log{i}/")
# my_file = sys.Path(f"{i_plus_one}")
# if my_file.is_dir():
# print("Attension")
# exit()
continueRunConvertion(n=12, rerun=i)
# continueRunConvertion(n=12, rerun=i, convert_read_data=True)
do(f"mkdir {i_plus_one}")
# do(f"sed 's/2xov_{i}/2xov_{i_plus_one}/g' run_{i}.slurm > run_{i_plus_one}.slurm")
# replace(f"run_{i_plus_one}.slurm", "/home/ns24/lmp_mpi", "/home/wl45/build/awsem_lipid_fluctuations/src/lmp_mpi")
# do(f"sbatch run_{i_plus_one}.slurm")
run_slurm = base_run_slurm.format(i_plus_one)
with open(f"run_{i_plus_one}.slurm", "w") as r:
r.write(run_slurm)
# replace(f"run_{i_plus_one}.slurm", "ctbp-common", "commons")
do(f"sbatch run_{i_plus_one}.slurm")
cd("..")
if args.mode == 1:
queue = 2
# i = "0"
# i = "1"
i = "2"
# i = "3"
# let_compute_localQ(temper=True, bias="dis_", sim_list=[i], queue=1)
# compute_disReal(temper=True, bias="dis_", sim_list=[i], queue=queue)
# compute_disReal(temper=True, targetMode=1, bias="dis_", sim_list=[i], queue=queue)
# compute_completeZ(temper=True, bias="dis_", sim_list=[i], queue=queue)
compute_disReal(temper=True, targetMode=2, bias="dis_", sim_list=[i], queue=queue)
compute_disReal(temper=True, targetMode=3, bias="dis_", sim_list=[i], queue=queue)
def removeResXfromlist():
with open(f"database/cath-dataset-nonredundant-S20Clean.list", "w") as out2:
with open(f"database/cath-dataset-nonredundant-S20Clean.atom.fa", "w") as out:
with open("database/cath-dataset-nonredundant-S20.atom.fa", "r") as f:
count = 0
for l in f:
if count % 2 == 0:
# extract protein id
assert(l[0] == ">")
# print(l)
tmp = l
name = re.search('>cath\|(.*)\|(\w{7})\/(.*)', l).group(2)
# name = "test"
# print(name)
else:
assert(l[0] != ">")
# print(l)
if "X" in l:
pass
else:
out.write(tmp)
out.write(l)
out2.write(name+"\n")
count += 1
def removeExtraName():
'''
remove The 'B' or possible 'A' at position 16
for example, chagne from
ATOM 193 CB BMET A 30 -20.305 -21.245 -45.095 0.50 10.77
to
ATOM 193 CB MET A 30 -20.305 -21.245 -45.095 0.50 10.77
'''
p_list = glob.glob("database/dompdb_origin/*")
for name in p_list:
toName = name.replace("dompdb_origin", "dompdb_cleaned")
with open(toName, "w") as out:
with open(name, "r") as f:
for l in f:
tmp = list(l)
tmp[16] = " "
out.write("".join(tmp))
def isComplete(a):
for model in a:
for chain in model:
for res in chain:
try:
if res["CA"] is not None:
pass
if res.get_resname() == "GLY" or res["CB"] is not None:
pass
if res["N"] is not None:
pass
if res["C"] is not None:
pass
except:
print(res)
return 0
return 1
def generate_multiShuffle(fullName, alignmentLocation="./", location="./", num_decoys=1000, nameMode=0):
if nameMode == 0:
with open(alignmentLocation+f"alignments/{fullName}_filtered_0.05.seqs") as f:
a = f.readlines()
elif nameMode == 1:
with open(alignmentLocation+f"alignments/{fullName}.seqs") as f:
a = f.readlines()
# with open(location+f"../database/S20_seq/{fullName}.seq") as f:
# b = f.readlines()
size = len(a)
if size < num_decoys:
a = a * (num_decoys//size+1)
print(fullName, len(a), size)
with open(location+f"decoys/multiShuffle/{fullName}.decoys", "w") as out:
for seq in random.sample(a, num_decoys):
s = seq.strip()
shuffled_seq = ''.join(random.sample(s,len(s)))
out.write(shuffled_seq+"\n")
# print(shuffled_seq)
def waitForJobs(jobIdList, sleepInterval=30, userName="wl45"):
from datetime import datetime as dt
if len(jobIdList) == 0:
return
previousJobNotFinished = True
while previousJobNotFinished:
print(f"Waiting for previous jobs {jobIdList}", dt.now())
previousJobNotFinished = False
a = getFromTerminal(f"squeue -u {userName}")
for jobId in jobIdList:
if jobId in a:
previousJobNotFinished = True
if previousJobNotFinished:
time.sleep(sleepInterval)
print("Continue Next Script")
# dataset = {"old":("1R69, 1UTG, 3ICB, 256BA, 4CPV, 1CCR, 2MHR, 1MBA, 2FHA".split(", "), 40),
# "new":("1FC2C, 1ENH, 2GB1, 2CRO, 1CTF, 4ICB".split(", "), 80),
# "test":(["t089", "t120", "t251", "top7", "1ubq", "t0766", "t0778", "t0782", "t0792", "t0803", "t0815", "t0833", "t0842", "t0844"], 40)}
# dataset = {"old":("1R69, 1UTG, 3ICB, 256BA, 4CPV, 1CCR, 2MHR".split(", "), 40),
# "new":("1FC2C, 1ENH, 2GB1, 2CRO, 1CTF, 4ICB".split(", "), 80),
# "test":(["t089", "t120", "t251", "top7", "1ubq", "t0766", "t0778", "t0782", "t0792", "t0803", "t0815", "t0833", "t0842", "t0844"], 40),
# }
dataset = {"old":"1R69, 1UTG, 3ICB, 256BA, 4CPV, 1CCR, 2MHR, 1MBA, 2FHA".split(", "),
"new":"1FC2C, 1ENH, 2GB1, 2CRO, 1CTF, 4ICB".split(", "),
"test":["t089", "t120", "t251", "top7", "1ubq", "t0766", "t0778", "t0782", "t0792", "t0803", "t0815", "t0833", "t0842", "t0844"]}
dataset["combined"] = dataset["old"] + dataset["new"]
dataset["may13"] = ['1r69', '3icb', '256b', '4cpv', '2mhr', '1mba', '2fha', '1fc2', '1enh', '2gb1', '2cro', '1ctf', '4icb']
dataset["membrane"] = ["2bg9", "1j4n", "1py6_SD", "2bl2", "1rhz", "1iwg", "2ic8", "1pv6", "1occ", "1kpl", "2bs2", "1py6", "1u19"]
dataset["hybrid"] = ["2xov_complete", "6e67A", "5xpd", "3kp9", "4a2n", "5d91", "4nv6", "4p79", "5dsg", "6g7o", "6a93", "2jo1", "1py6", "1pv6", "1u19"]
dataset["optimization"] = ['1e0m', '1w4e', '1e0g', '2wqg', '1jo8', '1fex', '2l6r', '1c8c', '1g6p', '1mjc', '2jmc', '1hdn', '1st7', '1n88', '1d6o', '1hcd', '2ga5', '1j5u', '3o4d', '1k0s']
dataset["optimization_cath"] = ['1a75A00', '1bekA01', '1bqbA02', '1cpcB00', '1cscA02', '1cy5A00', '1dv5A00', '1e8yA05', '1evyA02', '1in4A03', '1l1fA03', '1vq8P01', '1xmkA00', '1zcaA02', '2grhA00', '2ii2A04', '2q6fB03', '2wh6A00', '3g0vA00', '3geuA00', '3h99A03', '3hrdD02', '3ju5A01', '3p1wA03', '4cxfA01', '4i2aA01', '4i4tB03', '4i6uB00', '5kn9A02']
# '1hcd', '1k0s' have problem in beta sheets. (the crystal structure is more round while prediction is more like a straight sheet)
dataset["optimization_v2"] = ['1e0m', '1w4e', '1e0g', '2wqg', '1jo8', '1fex', '2l6r', '1c8c', '1g6p', '1mjc', '2jmc', '1hdn', '1st7', '1n88', '1d6o', '2ga5', '1j5u', '3o4d']
dataset["optimization_cbd"] = ['1hoe', '1hyp', '1tif', '1vcc', '1by9', '1bdo', '451c', '1cc5', '1bb9', '1pht', '1opd', '1a32', '1ptf', '1cyo', '1tig', '1ctj', '1fna', '1rzl', '1who', '2cbp', '2acy', '1plc', '1bm8', '1opc', '3vub', '1tul', '1kte', '1erv', '1btn', '1a1x', '3cyr', '1bkf', '1ycc', '1sfp', '1kpf', '2mcm', '2pii', '1a6f', '1by2', '1bea', '1rmd', '1poa', '1tmy', '2a0b', '1mai', '1neu', '1dun', '1b6e', '2sak', '1dhn', '1cxc', '1bgf', '7rsa', '1bqk', '3pyp', '1bfg', '1opy', '1rlw', '1rie', '3chy', '1rcb', '1cpq', '1pdo', '3lzt', '1hmt', '1htp', '1c52', '1kuh', '1crb', '1poc', '1aqt', '2end', '5nul', '1pne', '1lcl', '2sns', '1flp', '1tfe', '1ax8', '1pkp', '1rss', '1jon', '1vls', '1lba', '1aly', '1mba', '2hbg', '1akr', '1osa', '1div']
# data = pd.read_csv("~/Research/database/training_set_apr11_2020.csv")
# specific_decoys = data.query("Length < 150 and Length > 70").reset_index(drop=True)
# pdb_list = specific_decoys["Protein"].to_list()
# pdb_list = [a.lower() for a in pdb_list]
# skip_pdb_list = ["1puc", "1skz"]
# skip_pdb_list += ["1msc", "1fmb", "1gvp", "2tgi", "1whi", "1baj", "1rmd", "1div"] #dimer.
# skip_pdb_list += ["1aqe"] # lots of ligand
# skip_pdb_list += ['1by2', '1rcb', '1hyp', '3lzt', '3cyr', '7rsa', '1rzl', '1b6e', '1poc', '1bea', '1poa'] # has at least 6 CYS.
# filtered_pdb_list = [x for x in pdb_list if x not in skip_pdb_list]
dataset["apr11_2020"] = ['1hoe', '1tif', '1vcc', '1by9', '1bdo', '451c', '1cc5', '1bb9', '1pht', '1opd', '1a32', '1ptf', '1cyo', '1tig', '1ctj', '1fna', '1who', '2cbp', '2acy', '1plc', '1bm8', '1opc', '3vub', '1tul', '1kte', '1erv', '1btn', '1a1x', '1bkf', '1ycc', '1sfp', '1kpf', '2mcm', '2pii', '1a6f', '1tmy', '2a0b', '1mai', '1neu', '1dun', '2sak', '1dhn', '1cxc', '1bgf', '1bqk', '3pyp', '1bfg', '1opy', '1rlw', '1rie', '3chy', '1cpq', '1pdo', '1hmt', '1htp', '1c52', '1kuh', '1crb', '1aqt', '2end', '5nul', '1pne', '1lcl', '2sns', '1flp', '1tfe', '1ax8', '1pkp', '1rss', '1jon', '1vls', '1lba', '1aly', '1mba', '2hbg', '1akr', '1osa']
# def returnSteps(p):
# if p in "1FC2C, 1ENH, 2GB1, 2CRO, 1CTF, 4ICB".split(", "):
# steps = 80
# elif p in "1R69, 1UTG, 3ICB, 256BA, 4CPV, 1CCR, 2MHR".split(", "):
# steps = 40
# elif p in ["1MBA", "2FHA"]:
# steps = 30
# return steps
def get_aligned_info(p1, p2):
cmd = f"/home/wl45/build/TMalign/TMalign {p1} {p2} | grep -A 1 'Aligned'"
output = getFromTerminal(cmd)
# print(output)
line = output.split("\n")[0]
line2 = output.split("\n")[1]
aligned_length,rmsd,seqid = line.split(",")
aligned_length = int(aligned_length.split("=")[1])
rmsd = float(rmsd.split("=")[1])
tmscore = float(line2.split(",")[0].split("=")[-1].strip().split(" ")[0])
seqid = float(seqid.split("=")[-1])
# print("aligned_length, rmsd, tmscore, seqid")
# print(aligned_length, rmsd, tmscore, seqid)
return aligned_length, rmsd, tmscore, seqid
def readList(fileName):
with open(fileName) as f:
a = f.readlines()
theList = [b.strip() for b in a]
return theList
def slurmRun(slurmFileName, cmd, template=scavenge_slurm, memory=1, thread=1, runOnServer=True):
os.system("mkdir -p outs")
if not runOnServer:
do(cmd)
return "runOnLocalMachine"
with open(slurmFileName, "w") as out:
out.write(template.format(cmd))
# out.write(scavenge_slurm.format(f"python3 ~/opt/compute_phis.py -m 1 proteins_name_list/proteins_name_list_{name}.txt"))
replace(slurmFileName, "#SBATCH --mem-per-cpu=1G", f"#SBATCH --mem-per-cpu={memory}G")
replace(slurmFileName, "#SBATCH --cpus-per-task=1", f"#SBATCH --cpus-per-task={thread}")
a = getFromTerminal(f"sbatch {slurmFileName}")
jobId = a.split(" ")[-1].strip()
return jobId
if args.day == "plot":
import matplotlib.pyplot as plt
cutoff_list = [100, 200, 300, 400, 500, 600]
# cutoff_list += [10, 20, 30, 40, 50, 80]
save_gamma_pre = "saved_gammas"
# trial_name = "iter1"
trial_name = args.label
os.system(f"mkdir -p {save_gamma_pre}/figures")
for cutoff_i in cutoff_list:
# cutoff_i = 400
name = f"{save_gamma_pre}/{trial_name}_cutoff{cutoff_i}_impose_Aprime_constraint"
filtered_gamma_ = np.loadtxt(name)
if len(filtered_gamma_) == 840:
filtered_gamma = filtered_gamma_
n = 4
ax_title_list=["Direct_H", "Direct_P", "High density(protein)", "Low density(water)"]
else:
n = 3
filtered_gamma = np.zeros(630)
for i in range(min(len(filtered_gamma_), 630)):
filtered_gamma[i] = filtered_gamma_[i]
ax_title_list=["Direct", "High density(protein)", "Low density(water)"]
figureName = f"{save_gamma_pre}/figures/{trial_name}_cutoff{cutoff_i}_equal_legend"
title = f"{trial_name}_cutoff{cutoff_i}"
print(cutoff_i, name, figureName)
show_together_v2(filtered_gamma, figureName, title=title, inferBound=2, n=n, ax_title_list=ax_title_list)
def read_simulation_info(folder_list, pdb_list, simulationType, base_path, run_n):
all_data = []
for folder in folder_list:
for pdb in pdb_list:
for i in range(run_n):
pre = f"{base_path}/{simulationType}/{folder}/{pdb}/{i}"
info_file = "info.dat"
location = f"{pre}/{info_file}"
try:
tmp = pd.read_csv(location, sep="\s+")
tmp = tmp.assign(Run=i, Protein=pdb, Folder=folder)
all_data.append(tmp)
except:
print(pdb, i, folder)
pass
data = pd.concat(all_data)
today = datetime.today().strftime('%m-%d')
outFile = f"{simulationType}.csv"
data.reset_index(drop=True).to_csv(outFile)
print(outFile)
return outFile
def get_z_scores(trial_name):
from pyCodeLib import validate_hamiltonian_wei
import warnings
warnings.filterwarnings('ignore')
# print("mar08")
cutoff = 400
# cutoff = 600
pre = "."
gamma_pre = f"{pre}/saved_gammas"
# trial_name = args.label
do(f"cp -r gammas back_gammas_{trial_name}")
gamma_file_name = f"{gamma_pre}/{trial_name}_cutoff{cutoff}_impose_Aprime_constraint"
# gamma_file_name = f"{pre}/iter_2_30"
data = validate_hamiltonian_wei("phi_list.txt", "protein_list_complete", gamma_file_name, "openMM", 100, mode=0)
dataFile = f"optimization_2020.csv"
data.to_csv(dataFile)
def get_weighted_Q(preDecoyBiasName, decoyBiasName):
from pyCodeLib import read_phi_list, read_column_from_file, get_parameters_string
phi_list_file_name = "phi_list.txt"
training_set_file = "protein_list_complete"
decoy_method = "openMM"
dataFile = f"optimization_2020.csv"
data = pd.read_csv(dataFile, index_col=0)
data["normalized_Z_weight"] = (10/(data["Z_scores"] - data["Z_scores"].min() + 10))
data["normalized_Z_weight_sq"] = (10/(data["Z_scores"] - data["Z_scores"].min() + 10))**2
data["normalized_Z_weight_half"] = (10/(data["Z_scores"] - data["Z_scores"].min() + 10))**0.5
phi_list = read_phi_list(phi_list_file_name)
training_set = read_column_from_file(training_set_file, 1)
protein = training_set[0]
for i, line in data.iterrows():
protein = line["Protein"]
normalized_Z_weight = line["normalized_Z_weight"]
normalized_Z_weight_sq = line["normalized_Z_weight_sq"]
normalized_Z_weight_half = line["normalized_Z_weight_half"]
for i_phi_function, phi_and_parameters in enumerate(phi_list):
# print(protein, i_phi_function, phi_and_parameters)
phi = phi_and_parameters[0]
parameters = phi_and_parameters[1]
i_phi = phi_list.index(phi_and_parameters)
parameters_string = get_parameters_string(parameters)
# try:
fromFile = f"../phis/{phi}_{protein}_{preDecoyBiasName}_{decoy_method}_{parameters_string}"
a = np.loadtxt(fromFile)
# print(a)
# b = (1-a) * normalized_Z_weight
# toFile = f"../phis/{phi}_{protein}_{decoyBiasName}_{decoy_method}_{parameters_string}"
# np.savetxt(toFile, b, fmt='%.4f')
if preDecoyBiasName == "decoysQ":
b = (1-a) * normalized_Z_weight
elif preDecoyBiasName == "deocyQZBias":
b = a * normalized_Z_weight
else:
b = a * normalized_Z_weight
# b = (1-a) * normalized_Z_weight_sq
# b = (1-a) * normalized_Z_weight_half
toFile = f"../phis/{phi}_{protein}_{decoyBiasName}_{decoy_method}_{parameters_string}"
np.savetxt(toFile, b, fmt='%.4f')
def getSeqFromPDB(fileLocation):
p = PDBParser()
s = p.get_structure("test", fileLocation)
seq = ""
residues = list(s.get_residues())
for residue in residues:
res_id = residue.get_id()[0]
if res_id==' ':
residue_name = residue.get_resname()
seq += three_to_one(residue_name)
return seq
def optimization_setupDatabase(pdb_list, fromFolder, toFolder):
do(f"mkdir -p {toFolder}/database/dompdb")
do(f"mkdir -p {toFolder}/database/S20_seq")
for pdb in pdb_list:
do(f"cp {fromFolder}/{pdb}.pdb {toFolder}/database/dompdb/")
for pdb in pdb_list:
fileLocation = f"{toFolder}/database/dompdb/{pdb}.pdb"
seq = getSeqFromPDB(fileLocation)
opt_pdbName = pdb
fileLocation = f"{toFolder}/database/S20_seq/{opt_pdbName}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
def optimization_setupFolder(phi_list, pdb_list):
do(f"cp {phi_list} phi_list.txt")
do("mkdir proteins_name_list")
do("mkdir slurms")
do('mkdir -p outs')
# do("mkdir -p decoys/multiShuffle")
do("mkdir -p decoys")
do("mkdir -p gammas")
do("mkdir -p ../phis")
with open("protein_list", "w") as out:
for pdb in pdb_list:
out.write(f"{pdb}\n")
def optimization_setupDecoys_shuffle(n_decoys, pdb_per_txt=2, runOnServer=True, template=base_slurm):
n = split_proteins_name_list(pdb_per_txt=pdb_per_txt)
jobIdList = []
for i in range(n):
proteins = f"proteins_name_list/proteins_name_list_{i}.txt"
# generate_decoy_sequences(proteins, methods=['shuffle'], num_decoys=[n_decoys], databaseLocation="../../../")
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 -d genDecoy -m 1 {proteins}", template=base_slurm, memory=10)
jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/generate_decoys.py -n {n_decoys} -m 1 {proteins}", template=template, memory=10, runOnServer=runOnServer)
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/compute_phis.py -m 0 {proteins}", template=base_slurm, memory=10, runOnServer=runOnServer)
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/compute_phis.py -m 0 proteins_name_list/proteins_name_list_{i}.txt")
jobIdList.append(jobId)
do(f"cat {proteins} >> iter0_complete.txt")
waitForJobs(jobIdList, sleepInterval=60)
def optimization_convertPDB(base_path, simulationType, folder_list, pdb_list, run_n):
cd(f"{base_path}/{simulationType}")
jobIdList = []
for folder in folder_list:
cd(folder)
for pdb in pdb_list:
for i in range(run_n):
cd(f"{pdb}/{i}")
# do()
cmd = "python /projects/pw8/wl45/openawsem/helperFunctions/convertOpenmmTrajectoryToStandardMovie.py movie.pdb"
jobId = slurmRun(f"convert.slurm", cmd, template=scavenge_slurm)
jobIdList.append(jobId)
cd("../..")
cd("..")
waitForJobs(jobIdList, sleepInterval=30)
def optimization_setupDecoys(optimizationBasePath, optimizationFolder, folder_list, pdb_list, simulationType, base_path, run_n, lastN_frame,
phi_list="../phi_list_environment_complete.txt"):
do(f"mkdir -p {optimizationBasePath}/{optimizationFolder}")
cd(f"{optimizationBasePath}/{optimizationFolder}")
do(f"cp {phi_list} phi_list.txt")
do("mkdir proteins_name_list")
do("mkdir slurms")
do('mkdir -p outs')
# do("mkdir -p decoys/multiShuffle")
do("mkdir -p decoys")
do("mkdir -p gammas")
do("mkdir -p ../phis")
# with open("protein_list", "w") as out:
# for pdb in pdb_list:
# out.write(f"{pdb}\n")
with open("protein_list", "w") as out:
for folder in folder_list:
for pdb in pdb_list:
out.write(f"{pdb}_{folder}\n")
# if args.mode == 3:
outFile = read_simulation_info(folder_list, pdb_list, simulationType, base_path, run_n)
# today = datetime.today().strftime('%m-%d')
outFile = f"{simulationType}.csv"
# if args.mode == 13:
# time.sleep(3600)
dataFile = f"{simulationType}.csv"
simulation_folder = simulationType
data = pd.read_csv(dataFile, index_col=0)
parser = PDBParser()
# folder = "first"
# for folder in ["first", "first_cpu2"]:
for folder in folder_list:
pre = f"{base_path}/{simulation_folder}/{folder}"
to_folder = "."
os.system(f"mkdir -p {to_folder}/decoys/openMM")
# pdb_list = ['1j5u']
for pdb in pdb_list:
complete_models = []
print(pdb)
for i in range(run_n):
movieFile = f"{pre}/{pdb}/{i}/movie.pdb"
allFrames, n, size = getAllFrames(movieFile)
num_of_frames = int(n/size)
first_chosen_frame = num_of_frames - lastN_frame
# last_chosen_frame = num_of_frames
oneFrame = allFrames[size*first_chosen_frame:size*(num_of_frames)]
p = PDBParser()
f = io.StringIO("".join(oneFrame))
s = p.get_structure("test", f)
complete_models += list(s.get_models())
t = data.query(f"Protein == '{pdb}' and Folder == '{folder}' and Steps > 1").reset_index(drop=True)
t = t.groupby("Run").tail(50).reset_index(drop=True)
t["structure"] = complete_models
t = t.rename(columns={"Q":"Qw"})
# last50 = t.groupby("Run").tail(50).reset_index(drop=True)
last50 = t
# print(last50.head())
# print(last50.tail())
# print(last50.shape)
# print(len(complete_models))
to_folder = "."
last50.to_pickle(f"{to_folder}/decoys/openMM/{pdb}_{folder}.pkl")
def optimization_compute_phi_and_gamma(complete_folder_list, pdb_list, runOnServer=True, phi_cmd="python3 ~/opt/compute_phis.py -m 7 {}", gamma_cmd="python3 ~/opt/gg_server.py -d apr11 -m 44", template=base_slurm):
n = split_proteins_name_list(pdb_per_txt=2)
jobIdList = []
for i in range(n):
proteins = f"proteins_name_list/proteins_name_list_{i}.txt"
# generate_decoy_sequences(proteins, methods=['shuffle'], num_decoys=[n_decoys], databaseLocation="../../../")
jobId = slurmRun(f"slurms/run_{i}.slurm", phi_cmd.format(proteins), template=template, memory=10, runOnServer=runOnServer)
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/compute_phis.py -m 0 proteins_name_list/proteins_name_list_{i}.txt")
jobIdList.append(jobId)
do(f"cat {proteins} >> iter0_complete.txt")
waitForJobs(jobIdList, sleepInterval=60)
with open("protein_list_complete", "w") as out:
for folder in complete_folder_list:
for pdb in pdb_list:
out.write(f"{pdb}_{folder}\n")
jobIdList = []
jobId = slurmRun(f"slurms/run_on_scavenge.slurm", f"{gamma_cmd}", template=template, memory=60, runOnServer=runOnServer)
jobIdList.append(jobId)
waitForJobs(jobIdList, sleepInterval=60)
def optimization_z_bias(iteration, c=-50):
# mode = 1 for this environment.
do(f"optimization_analyze.py {iteration} --proteinList protein_list_complete -c {c} -m 1")
trial_name = iteration
get_z_scores(trial_name)
get_weighted_Q("decoysQ", "deocyQZBias")
# if args.mode == 7:
decoyBiasName = "deocyQZBias"
jobIdList = []
jobId = slurmRun(f"slurms/get_gamma_{decoyBiasName}.slurm", f"python3 ~/opt/gg_server.py -d apr11 -m 77 -l {decoyBiasName}", template=base_slurm, memory=60)
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/compute_phis.py -m 0 proteins_name_list/proteins_name_list_{i}.txt")
jobIdList.append(jobId)
waitForJobs(jobIdList, sleepInterval=60)
# if args.mode == 8:
# i = "iter4_shift_center_z_weighted"
trial_name = f"{iteration}_zBias_1"
# i = args.label
do(f"optimization_analyze.py {trial_name} --proteinList protein_list_complete -c {c} -m 1")
get_z_scores(trial_name)
get_weighted_Q("deocyQZBias", "decoyQZBias_2")
decoyBiasName = "decoyQZBias_2"
jobIdList = []
jobId = slurmRun(f"slurms/get_gamma_{decoyBiasName}.slurm", f"python3 ~/opt/gg_server.py -d apr11 -m 77 -l {decoyBiasName}", template=base_slurm, memory=60)
# jobId = slurmRun(f"slurms/run_{i}.slurm", f"python3 ~/opt/compute_phis.py -m 0 proteins_name_list/proteins_name_list_{i}.txt")
jobIdList.append(jobId)
waitForJobs(jobIdList, sleepInterval=60)
trial_name = f"{iteration}_zBias_2"
print(trial_name)
# i = args.label
do(f"optimization_analyze.py {trial_name} --proteinList protein_list_complete -c {c} -m 1")
def gamma_to_plot(name, figureName, title):
filtered_gamma_ = np.loadtxt(name)
if len(filtered_gamma_) == 840:
filtered_gamma = filtered_gamma_
n = 4
ax_title_list=["Direct_H", "Direct_P", "High density(protein)", "Low density(water)"]
else:
n = 3
filtered_gamma = np.zeros(630)
for i in range(min(len(filtered_gamma_), 630)):
filtered_gamma[i] = filtered_gamma_[i]
ax_title_list=["Direct", "High density(protein)", "Low density(water)"]
# print(cutoff_i, name, figureName)
show_together_v2(filtered_gamma, figureName, title=title, inferBound=2, n=n, ax_title_list=ax_title_list)
if args.day == "example":
phi_list = "/home/wl45/opt/optimization/phi_list_contact.txt"
# phi_list = "phi_list.txt"
# pdb_list = dataset["apr11_2020"]
# data = pd.read_csv("/home/wl45/dataset/has_structures_small_dataset_cleaned.csv")
# pdb_list = data["Protein"].to_list()
pdb_list = ['3s5rA00', '1ba5A00', '4fdyA01', '1b43A02', '1zzhA02']
base_path = "/scratch/wl45/may_week1_2020"
# simulationType = "mass_iterative_run_traditional"
optimizationBasePath = f"{base_path}/example_optimization"
if args.mode == 1:
optimizationFolder = "iter0_example"
runOnServer = False
# lastN_frame = 50
# run_n = 2
n_decoys = 20
trial_name = optimizationFolder
opt_folder = "~/opt"
phi_cmd=f"python3 {opt_folder}/compute_phis.py -m 0 " + "{}"
# phi_cmd="python3 ~/opt/compute_phis.py -m 7 {proteins}"
gamma_cmd=f"python3 {opt_folder}/generate_gamma.py -m 1 -n {n_decoys}"
complete_folder_list = "protein_list"
# start from the pdbs in source folder, move them to database folder.
fromFolder = f"{optimizationBasePath}/source"
optimization_setupDatabase(pdb_list, fromFolder, optimizationBasePath)
# setup the optimization folder.
do(f"mkdir -p {optimizationBasePath}/{optimizationFolder}")
cd(f"{optimizationBasePath}/{optimizationFolder}")
optimization_setupFolder(phi_list, pdb_list)
optimization_setupDecoys_shuffle(n_decoys, runOnServer=runOnServer)
optimization_compute_phi_and_gamma(complete_folder_list, pdb_list,
runOnServer=runOnServer, phi_cmd=phi_cmd, gamma_cmd=gamma_cmd)
do(f"optimization_analyze.py {trial_name}")
do(f"gg_server.py -d plot -l {trial_name}")
if args.mode == 3:
optimizationFolder = "iter0_test_server"
runOnServer = True
# lastN_frame = 50
# run_n = 2
n_decoys = 100
trial_name = optimizationFolder
phi_cmd="python3 ~/opt/compute_phis.py -m 0 {}"
# phi_cmd="python3 ~/opt/compute_phis.py -m 7 {proteins}"
gamma_cmd=f"python3 ~/opt/generate_gamma.py -m 1 -n {n_decoys}"
complete_folder_list = "protein_list"
# start from the pdbs in source folder, move them to database folder.
fromFolder = f"{optimizationBasePath}/source"
optimization_setupDatabase(pdb_list, fromFolder, optimizationBasePath)
# setup the optimization folder.
do(f"mkdir -p {optimizationBasePath}/{optimizationFolder}")
cd(f"{optimizationBasePath}/{optimizationFolder}")
optimization_setupFolder(phi_list, pdb_list)
optimization_setupDecoys_shuffle(n_decoys, runOnServer=runOnServer, template=base_casp14_slurm)
optimization_compute_phi_and_gamma(complete_folder_list, pdb_list,
runOnServer=runOnServer, phi_cmd=phi_cmd, gamma_cmd=gamma_cmd, template=base_casp14_slurm)
do(f"optimization_analyze.py {trial_name}")
do(f"gg_server.py -d plot -l {trial_name}")
# if args.mode == 1:
# random.seed(5)
# selected = random.sample(pdb_list, 5)
# print(selected)
# for pdb in selected:
# do(f"cp ../cath_dataset_shuffle_optimization/database/dompdb/{pdb}.pdb source/")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._certificates_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateResource":
"""Get the certificate resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param certificate_name: The name of the certificate resource.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_01_01_preview.models.CertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
certificate_name=certificate_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates/{certificateName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
certificate_name: str,
certificate_resource: "_models.CertificateResource",
**kwargs: Any
) -> "_models.CertificateResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_resource, 'CertificateResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
certificate_name=certificate_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CertificateResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates/{certificateName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
certificate_name: str,
certificate_resource: "_models.CertificateResource",
**kwargs: Any
) -> AsyncLROPoller["_models.CertificateResource"]:
"""Create or update certificate resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param certificate_name: The name of the certificate resource.
:type certificate_name: str
:param certificate_resource: Parameters for the create or update operation.
:type certificate_resource:
~azure.mgmt.appplatform.v2022_01_01_preview.models.CertificateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CertificateResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.CertificateResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
certificate_name=certificate_name,
certificate_resource=certificate_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates/{certificateName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
certificate_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
certificate_name=certificate_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates/{certificateName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
certificate_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the certificate resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param certificate_name: The name of the certificate resource.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
certificate_name=certificate_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates/{certificateName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CertificateResourceCollection"]:
"""List all the certificates of one user.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2022_01_01_preview.models.CertificateResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/certificates'} # type: ignore
|
|
#!/usr/bin/env python
"""
Wgetter is another command line download utility written completely in python.
It is based on python-wget (https://bitbucket.org/techtonik/python-wget/src) with some improvements.
It works on python >= 2.6 or python >=3.0 Runs on Windows or Linux or Mac
API Usage:
>>> import wgetter
>>> filename = wgetter.download('https://sites.google.com/site/doctormike/pacman-1.2.tar.gz', outdir='/home/user')
100 % [====================================================>] 19.9KiB / 19.9KiB 100.0KiB/s eta 0:00:01
>>> filename
'/home/user/pacman-1.2.tar.gz'
"""
import sys
import os
import shutil
import tempfile
import hashlib
import datetime
from time import time
PY3K = sys.version_info >= (3, 0)
if PY3K:
import urllib.request as ulib
import urllib.parse as urlparse
import http.cookiejar as cjar
else:
import urllib2 as ulib
import urlparse
import cookielib as cjar
SUFFIXES = {1000: ['KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'],
1024: ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']}
def approximate_size(size, a_kilobyte_is_1024_bytes=True):
'''
Humansize.py from Dive into Python3
Mark Pilgrim - http://www.diveintopython3.net/
Copyright (c) 2009, Mark Pilgrim, All rights reserved.
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
'''
size = float(size)
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
size /= multiple
if size < multiple:
return '{0:.1f}{1}'.format(size, suffix)
raise ValueError('number too large')
def get_console_width():
"""Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-pager
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(
console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right + 1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
CONSOLE_WIDTH = get_console_width()
# Need 2 spaces more to avoid linefeed on Windows
AVAIL_WIDTH = CONSOLE_WIDTH - 59 if os.name == 'nt' else CONSOLE_WIDTH - 57
def filename_from_url(url):
""":return: detected filename or None"""
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname
def filename_from_headers(headers):
"""Detect filename from Content-Disposition headers if present.
http://greenbytes.de/tech/tc2231/
:param: headers as dict, list or string
:return: filename from content-disposition header or None
"""
if type(headers) == str:
headers = headers.splitlines()
if type(headers) == list:
headers = dict([x.split(':', 1) for x in headers])
cdisp = headers.get("Content-Disposition")
if not cdisp:
return None
cdtype = cdisp.split(';')
if len(cdtype) == 1:
return None
if cdtype[0].strip().lower() not in ('inline', 'attachment'):
return None
# several filename params is illegal, but just in case
fnames = [x for x in cdtype[1:] if x.strip().startswith('filename=')]
if len(fnames) > 1:
return None
name = fnames[0].split('=')[1].strip(' \t"')
name = os.path.basename(name)
if not name:
return None
return name
def filename_fix_existing(filename, dirname):
"""Expands name portion of filename with numeric ' (x)' suffix to
return filename that doesn't exist already.
"""
name, ext = filename.rsplit('.', 1)
names = [x for x in os.listdir(dirname) if x.startswith(name)]
names = [x.rsplit('.', 1)[0] for x in names]
suffixes = [x.replace(name, '') for x in names]
# filter suffixes that match ' (x)' pattern
suffixes = [x[2:-1] for x in suffixes
if x.startswith(' (') and x.endswith(')')]
indexes = [int(x) for x in suffixes
if set(x) <= set('0123456789')]
idx = 1
if indexes:
idx += sorted(indexes)[-1]
return '{0}({1}).{2}'.format(name, idx, ext)
def report_bar(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used to print the download bar
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(9)
total = approximate_size(total_size).center(9)
shaded = int(float(bytes_so_far) / total_size * AVAIL_WIDTH)
sys.stdout.write(
" {0}% [{1}{2}{3}] {4}/{5} {6} eta{7}".format(str(percent).center(4),
'=' * (shaded - 1),
'>',
' ' * (AVAIL_WIDTH - shaded),
current,
total,
(approximate_size(speed) + '/s').center(11),
eta.center(10)))
sys.stdout.write("\r")
sys.stdout.flush()
def report_unknown(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used
when the total size is unknown
'''
sys.stdout.write(
"Downloading: {0} / Unknown - {1}/s ".format(approximate_size(bytes_so_far),
approximate_size(speed)))
sys.stdout.write("\r")
sys.stdout.flush()
def report_onlysize(bytes_so_far, total_size, speed, eta):
'''
This callback for the download function is used when console width
is not enough to print the bar.
It prints only the sizes
'''
percent = int(bytes_so_far * 100 / total_size)
current = approximate_size(bytes_so_far).center(10)
total = approximate_size(total_size).center(10)
sys.stdout.write('D: {0}% -{1}/{2}'.format(percent, current, total) + "eta {0}".format(eta))
sys.stdout.write("\r")
sys.stdout.flush()
def md5sum(filename, blocksize=8192):
'''
Returns the MD5 checksum of a file
'''
with open(filename, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
def download(link, outdir='.', chunk_size=4096):
'''
This is the Main function, which downloads a given link
and saves on outdir (default = current directory)
'''
url = None
fh = None
eta = 'unknown '
bytes_so_far = 0
filename = filename_from_url(link) or "."
cj = cjar.CookieJar()
# get filename for temp file in current directory
(fd_tmp, tmpfile) = tempfile.mkstemp(
".tmp", prefix=filename + ".", dir=outdir)
os.close(fd_tmp)
os.unlink(tmpfile)
try:
opener = ulib.build_opener(ulib.HTTPCookieProcessor(cj))
url = opener.open(link)
fh = open(tmpfile, mode='wb')
headers = url.info()
try:
total_size = int(headers['Content-Length'])
except (ValueError, KeyError, TypeError):
total_size = 'unknown'
try:
md5_header = headers['Content-MD5']
except (ValueError, KeyError, TypeError):
md5_header = None
# Define which callback we're gonna use
if total_size != 'unknown':
if CONSOLE_WIDTH > 57:
reporthook = report_bar
else:
reporthook = report_onlysize
else:
reporthook = report_unknown
# Below are the registers to calculate network transfer rate
time_register = time()
speed = 0.0
speed_list = []
bytes_register = 0.0
eta = 'unknown '
# Loop that reads in chunks, calculates speed and does the callback to
# print the progress
while True:
chunk = url.read(chunk_size)
# Update Download Speed every 1 second
if time() - time_register > 0.5:
speed = (bytes_so_far - bytes_register) / \
(time() - time_register)
speed_list.append(speed)
# Set register properly for future use
time_register = time()
bytes_register = bytes_so_far
# Estimative of remaining download time
if total_size != 'unknown' and len(speed_list) == 3:
speed_mean = sum(speed_list) / 3
eta_sec = int((total_size - bytes_so_far) / speed_mean)
eta = str(datetime.timedelta(seconds=eta_sec))
speed_list = []
bytes_so_far += len(chunk)
if not chunk:
sys.stdout.write('\n')
break
fh.write(chunk)
reporthook(bytes_so_far, total_size, speed, eta)
except KeyboardInterrupt:
print('\n\nCtrl + C: Download aborted by user')
print('Partial downloaded file:\n{0}'.format(os.path.abspath(tmpfile)))
sys.exit(1)
finally:
if url:
url.close()
if fh:
fh.close()
filenamealt = filename_from_headers(headers)
if filenamealt:
filename = filenamealt
# add numeric '(x)' suffix if filename already exists
if os.path.exists(os.path.join(outdir, filename)):
filename = filename_fix_existing(filename, outdir)
filename = os.path.join(outdir, filename)
shutil.move(tmpfile, filename)
# Check if sizes matches
if total_size != 'unknown' and total_size != bytes_so_far:
print(
'\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...')
# Check md5 if it was in html header
if md5_header:
print('\nValidating MD5 checksum...')
if md5_header == md5sum(filename):
print('MD5 checksum passed!')
else:
print('MD5 checksum do NOT passed!!!')
return filename
if __name__ == '__main__':
if len(sys.argv) == 1 or sys.argv[1] in {'-h', '--help'}:
print('Usage: {0} <URL>'.format(sys.argv[0]))
args = [str(elem) for elem in sys.argv[1:]]
for link in args:
print('Downloading ' + link)
filename = download(link)
print('\nSaved under {0}'.format(filename))
|
|
"""Manage config entries in Home Assistant."""
import asyncio
import functools
import logging
from typing import Any, Callable, Dict, List, Optional, Set, Union, cast
import uuid
import weakref
import attr
from homeassistant import data_entry_flow, loader
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import Event
from homeassistant.setup import async_process_deps_reqs, async_setup_component
from homeassistant.util.decorator import Registry
_LOGGER = logging.getLogger(__name__)
_UNDEF: dict = {}
SOURCE_DISCOVERY = "discovery"
SOURCE_IMPORT = "import"
SOURCE_SSDP = "ssdp"
SOURCE_USER = "user"
SOURCE_ZEROCONF = "zeroconf"
# If a user wants to hide a discovery from the UI they can "Ignore" it. The config_entries/ignore_flow
# websocket command creates a config entry with this source and while it exists normal discoveries
# with the same unique id are ignored.
SOURCE_IGNORE = "ignore"
# This is used when a user uses the "Stop Ignoring" button in the UI (the
# config_entries/ignore_flow websocket command). It's triggered after the "ignore" config entry has
# been removed and unloaded.
SOURCE_UNIGNORE = "unignore"
HANDLERS = Registry()
STORAGE_KEY = "core.config_entries"
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = ".config_entries.json"
SAVE_DELAY = 1
# The config entry has been set up successfully
ENTRY_STATE_LOADED = "loaded"
# There was an error while trying to set up this config entry
ENTRY_STATE_SETUP_ERROR = "setup_error"
# There was an error while trying to migrate the config entry to a new version
ENTRY_STATE_MIGRATION_ERROR = "migration_error"
# The config entry was not ready to be set up yet, but might be later
ENTRY_STATE_SETUP_RETRY = "setup_retry"
# The config entry has not been loaded
ENTRY_STATE_NOT_LOADED = "not_loaded"
# An error occurred when trying to unload the entry
ENTRY_STATE_FAILED_UNLOAD = "failed_unload"
UNRECOVERABLE_STATES = (ENTRY_STATE_MIGRATION_ERROR, ENTRY_STATE_FAILED_UNLOAD)
DISCOVERY_NOTIFICATION_ID = "config_entry_discovery"
DISCOVERY_SOURCES = (
SOURCE_SSDP,
SOURCE_ZEROCONF,
SOURCE_DISCOVERY,
SOURCE_IMPORT,
SOURCE_UNIGNORE,
)
EVENT_FLOW_DISCOVERED = "config_entry_discovered"
CONN_CLASS_CLOUD_PUSH = "cloud_push"
CONN_CLASS_CLOUD_POLL = "cloud_poll"
CONN_CLASS_LOCAL_PUSH = "local_push"
CONN_CLASS_LOCAL_POLL = "local_poll"
CONN_CLASS_ASSUMED = "assumed"
CONN_CLASS_UNKNOWN = "unknown"
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = (
"entry_id",
"version",
"domain",
"title",
"data",
"options",
"unique_id",
"system_options",
"source",
"connection_class",
"state",
"_setup_lock",
"update_listeners",
"_async_cancel_retry_setup",
)
def __init__(
self,
version: int,
domain: str,
title: str,
data: dict,
source: str,
connection_class: str,
system_options: dict,
options: Optional[dict] = None,
unique_id: Optional[str] = None,
entry_id: Optional[str] = None,
state: str = ENTRY_STATE_NOT_LOADED,
) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid.uuid4().hex
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = data
# Entry options
self.options = options or {}
# Entry system options
self.system_options = SystemOptions(**system_options)
# Source of the configuration (user, discovery, cloud)
self.source = source
# Connection class
self.connection_class = connection_class
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Unique ID of this entry.
self.unique_id = unique_id
# Listeners to call on update
self.update_listeners: List = []
# Function to cancel a scheduled retry
self._async_cancel_retry_setup: Optional[Callable[[], Any]] = None
async def async_setup(
self,
hass: HomeAssistant,
*,
integration: Optional[loader.Integration] = None,
tries: int = 0,
) -> None:
"""Set up an entry."""
if self.source == SOURCE_IGNORE:
return
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
try:
component = integration.get_component()
except ImportError as err:
_LOGGER.error(
"Error importing integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
if self.domain == integration.domain:
self.state = ENTRY_STATE_SETUP_ERROR
return
if self.domain == integration.domain:
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error importing platform config_flow from integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
self.state = ENTRY_STATE_SETUP_ERROR
return
# Perform migration
if not await self.async_migrate(hass):
self.state = ENTRY_STATE_MIGRATION_ERROR
return
try:
result = await component.async_setup_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_setup_entry did not return boolean", integration.domain
)
result = False
except ConfigEntryNotReady:
self.state = ENTRY_STATE_SETUP_RETRY
wait_time = 2 ** min(tries, 4) * 5
tries += 1
_LOGGER.warning(
"Config entry for %s not ready yet. Retrying in %d seconds.",
self.domain,
wait_time,
)
async def setup_again(now: Any) -> None:
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, integration=integration, tries=tries)
self._async_cancel_retry_setup = hass.helpers.event.async_call_later(
wait_time, setup_again
)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != integration.domain:
return
if result:
self.state = ENTRY_STATE_LOADED
else:
self.state = ENTRY_STATE_SETUP_ERROR
async def async_unload(
self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None
) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if self.source == SOURCE_IGNORE:
self.state = ENTRY_STATE_NOT_LOADED
return True
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if integration.domain == self.domain:
if self.state in UNRECOVERABLE_STATES:
return False
if self.state != ENTRY_STATE_LOADED:
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
self.state = ENTRY_STATE_NOT_LOADED
return True
supports_unload = hasattr(component, "async_unload_entry")
if not supports_unload:
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
try:
result = await component.async_unload_entry( # type: ignore
hass, self
)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and integration.domain == self.domain:
self.state = ENTRY_STATE_NOT_LOADED
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error unloading entry %s for %s", self.title, integration.domain
)
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
if self.source == SOURCE_IGNORE:
return
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if not hasattr(component, "async_remove_entry"):
return
try:
await component.async_remove_entry( # type: ignore
hass, self
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error calling entry remove callback %s for %s",
self.title,
integration.domain,
)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
handler = HANDLERS.get(self.domain)
if handler is None:
_LOGGER.error(
"Flow handler not found for entry %s for %s", self.title, self.domain
)
return False
# Handler may be a partial
while isinstance(handler, functools.partial):
handler = handler.func
if self.version == handler.VERSION:
return True
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
supports_migrate = hasattr(component, "async_migrate_entry")
if not supports_migrate:
_LOGGER.error(
"Migration handler not found for entry %s for %s",
self.title,
self.domain,
)
return False
try:
result = await component.async_migrate_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_migrate_entry did not return boolean", self.domain
)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save()
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error migrating entry %s for %s", self.title, self.domain
)
return False
def add_update_listener(self, listener: Callable) -> Callable:
"""Listen for when entry is updated.
Listener: Callback function(hass, entry)
Returns function to unlisten.
"""
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this entry."""
return {
"entry_id": self.entry_id,
"version": self.version,
"domain": self.domain,
"title": self.title,
"data": self.data,
"options": self.options,
"system_options": self.system_options.as_dict(),
"source": self.source,
"connection_class": self.connection_class,
"unique_id": self.unique_id,
}
class ConfigEntriesFlowManager(data_entry_flow.FlowManager):
"""Manage all the config entry flows that are in progress."""
def __init__(
self, hass: HomeAssistant, config_entries: "ConfigEntries", hass_config: dict
):
"""Initialize the config entry flow manager."""
super().__init__(hass)
self.config_entries = config_entries
self._hass_config = hass_config
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish a config flow and add an entry."""
flow = cast(ConfigFlow, flow)
# Remove notification if no other discovery config entries in progress
if not any(
ent["context"]["source"] in DISCOVERY_SOURCES
for ent in self.hass.config_entries.flow.async_progress()
if ent["flow_id"] != flow.flow_id
):
self.hass.components.persistent_notification.async_dismiss(
DISCOVERY_NOTIFICATION_ID
)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
# Check if config entry exists with unique ID. Unload it.
existing_entry = None
if flow.unique_id is not None:
# Abort all flows in progress with same unique ID.
for progress_flow in self.async_progress():
if (
progress_flow["handler"] == flow.handler
and progress_flow["flow_id"] != flow.flow_id
and progress_flow["context"].get("unique_id") == flow.unique_id
):
self.async_abort(progress_flow["flow_id"])
# Find existing entry.
for check_entry in self.config_entries.async_entries(result["handler"]):
if check_entry.unique_id == flow.unique_id:
existing_entry = check_entry
break
# Unload the entry before setting up the new one.
# We will remove it only after the other one is set up,
# so that device customizations are not getting lost.
if (
existing_entry is not None
and existing_entry.state not in UNRECOVERABLE_STATES
):
await self.config_entries.async_unload(existing_entry.entry_id)
entry = ConfigEntry(
version=result["version"],
domain=result["handler"],
title=result["title"],
data=result["data"],
options={},
system_options={},
source=flow.context["source"],
connection_class=flow.CONNECTION_CLASS,
unique_id=flow.unique_id,
)
await self.config_entries.async_add(entry)
if existing_entry is not None:
await self.config_entries.async_remove(existing_entry.entry_id)
result["result"] = entry
return result
async def async_create_flow(
self, handler_key: Any, *, context: Optional[Dict] = None, data: Any = None
) -> "ConfigFlow":
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
try:
integration = await loader.async_get_integration(self.hass, handler_key)
except loader.IntegrationNotFound:
_LOGGER.error("Cannot find integration %s", handler_key)
raise data_entry_flow.UnknownHandler
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(self.hass, self._hass_config, integration)
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error occurred loading config flow for integration %s: %s",
handler_key,
err,
)
raise data_entry_flow.UnknownHandler
handler = HANDLERS.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
if not context or "source" not in context:
raise KeyError("Context not set or doesn't have a source set")
flow = cast(ConfigFlow, handler())
flow.init_step = context["source"]
return flow
async def async_post_init(
self, flow: data_entry_flow.FlowHandler, result: dict
) -> None:
"""After a flow is initialised trigger new flow notifications."""
source = flow.context["source"]
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
self.hass.components.persistent_notification.async_create(
title="New devices discovered",
message=(
"We have discovered new devices on your network. "
"[Check it out](/config/integrations)"
),
notification_id=DISCOVERY_NOTIFICATION_ID,
)
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: dict) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = ConfigEntriesFlowManager(hass, self, hass_config)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries: List[ConfigEntry] = []
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
EntityRegistryDisabledHandler(hass).async_setup()
@callback
def async_domains(self) -> List[str]:
"""Return domains for which we have entries."""
seen: Set[str] = set()
result = []
for entry in self._entries:
if entry.domain not in seen:
seen.add(entry.domain)
result.append(entry.domain)
return result
@callback
def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]:
"""Return entry with matching entry_id."""
for entry in self._entries:
if entry_id == entry.entry_id:
return entry
return None
@callback
def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries)
return [entry for entry in self._entries if entry.domain == domain]
async def async_add(self, entry: ConfigEntry) -> None:
"""Add and setup an entry."""
self._entries.append(entry)
await self.async_setup(entry.entry_id)
self._async_schedule_save()
async def async_remove(self, entry_id: str) -> Dict[str, Any]:
"""Remove an entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
self._entries.remove(entry)
self._async_schedule_save()
dev_reg, ent_reg = await asyncio.gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
# After we have fully removed an "ignore" config entry we can try and rediscover it so that a
# user is able to immediately start configuring it. We do this by starting a new flow with
# the 'unignore' step. If the integration doesn't implement async_step_unignore then
# this will be a no-op.
if entry.source == SOURCE_IGNORE:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
entry.domain,
context={"source": SOURCE_UNIGNORE},
data={"unique_id": entry.unique_id},
)
)
return {"require_restart": not unload_success}
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_CONFIG),
self._store,
old_conf_migrate_func=_old_conf_migrator,
)
if config is None:
self._entries = []
return
self._entries = [
ConfigEntry(
version=entry["version"],
domain=entry["domain"],
entry_id=entry["entry_id"],
data=entry["data"],
source=entry["source"],
title=entry["title"],
# New in 0.79
connection_class=entry.get("connection_class", CONN_CLASS_UNKNOWN),
# New in 0.89
options=entry.get("options"),
# New in 0.98
system_options=entry.get("system_options", {}),
# New in 0.104
unique_id=entry.get("unique_id"),
)
for entry in config["entries"]
]
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state != ENTRY_STATE_NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config
)
if not result:
return result
return entry.state == ENTRY_STATE_LOADED
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
unload_result = await self.async_unload(entry_id)
if not unload_result:
return unload_result
return await self.async_setup(entry_id)
@callback
def async_update_entry(
self,
entry: ConfigEntry,
*,
unique_id: Union[str, dict, None] = _UNDEF,
data: dict = _UNDEF,
options: dict = _UNDEF,
system_options: dict = _UNDEF,
) -> None:
"""Update a config entry."""
if unique_id is not _UNDEF:
entry.unique_id = cast(Optional[str], unique_id)
if data is not _UNDEF:
entry.data = data
if options is not _UNDEF:
entry.options = options
if system_options is not _UNDEF:
entry.system_options.update(**system_options)
for listener_ref in entry.update_listeners:
listener = listener_ref()
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
async def async_forward_entry_setup(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if domain not in self.hass.config.components:
result = await async_setup_component(self.hass, domain, self._hass_config)
if not result:
return False
integration = await loader.async_get_integration(self.hass, domain)
await entry.async_setup(self.hass, integration=integration)
return True
async def async_forward_entry_unload(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if domain not in self.hass.config.components:
return True
integration = await loader.async_get_integration(self.hass, domain)
return await entry.async_unload(self.hass, integration=integration)
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return data to save."""
return {"entries": [entry.as_dict() for entry in self._entries]}
async def _old_conf_migrator(old_config: Dict[str, Any]) -> Dict[str, Any]:
"""Migrate the pre-0.73 config format to the latest version."""
return {"entries": old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
def __init_subclass__(cls, domain: Optional[str] = None, **kwargs: Any) -> None:
"""Initialize a subclass, register if possible."""
super().__init_subclass__(**kwargs) # type: ignore
if domain is not None:
HANDLERS.register(domain)(cls)
CONNECTION_CLASS = CONN_CLASS_UNKNOWN
@property
def unique_id(self) -> Optional[str]:
"""Return unique ID if available."""
# pylint: disable=no-member
if not self.context:
return None
return cast(Optional[str], self.context.get("unique_id"))
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> "OptionsFlow":
"""Get the options flow for this handler."""
raise data_entry_flow.UnknownHandler
@callback
def _abort_if_unique_id_configured(self, updates: Dict[Any, Any] = None) -> None:
"""Abort if the unique ID is already configured."""
assert self.hass
if self.unique_id is None:
return
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
if updates is not None and not updates.items() <= entry.data.items():
self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
raise data_entry_flow.AbortFlow("already_configured")
async def async_set_unique_id(
self, unique_id: str, *, raise_on_progress: bool = True
) -> Optional[ConfigEntry]:
"""Set a unique ID for the config flow.
Returns optionally existing config entry with same ID.
"""
if raise_on_progress:
for progress in self._async_in_progress():
if progress["context"].get("unique_id") == unique_id:
raise data_entry_flow.AbortFlow("already_in_progress")
# pylint: disable=no-member
self.context["unique_id"] = unique_id
for entry in self._async_current_entries():
if entry.unique_id == unique_id:
return entry
return None
@callback
def _async_current_entries(self) -> List[ConfigEntry]:
"""Return current entries."""
assert self.hass is not None
return self.hass.config_entries.async_entries(self.handler)
@callback
def _async_current_ids(self, include_ignore: bool = True) -> Set[Optional[str]]:
"""Return current unique IDs."""
assert self.hass is not None
return set(
entry.unique_id
for entry in self.hass.config_entries.async_entries(self.handler)
if include_ignore or entry.source != SOURCE_IGNORE
)
@callback
def _async_in_progress(self) -> List[Dict]:
"""Return other in progress flows for current domain."""
assert self.hass is not None
return [
flw
for flw in self.hass.config_entries.flow.async_progress()
if flw["handler"] == self.handler and flw["flow_id"] != self.flow_id
]
async def async_step_ignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Ignore this config flow."""
await self.async_set_unique_id(user_input["unique_id"], raise_on_progress=False)
return self.async_create_entry(title="Ignored", data={})
async def async_step_unignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Rediscover a config entry by it's unique_id."""
return self.async_abort(reason="not_implemented")
class OptionsFlowManager(data_entry_flow.FlowManager):
"""Flow to set options for a configuration entry."""
async def async_create_flow(
self,
handler_key: Any,
*,
context: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
) -> "OptionsFlow":
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(handler_key)
if entry is None:
raise UnknownEntry(handler_key)
if entry.domain not in HANDLERS:
raise data_entry_flow.UnknownHandler
flow = cast(OptionsFlow, HANDLERS[entry.domain].async_get_options_flow(entry))
return flow
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
flow = cast(OptionsFlow, flow)
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
raise UnknownEntry(flow.handler)
self.hass.config_entries.async_update_entry(entry, options=result["data"])
result["result"] = True
return result
class OptionsFlow(data_entry_flow.FlowHandler):
"""Base class for config option flows."""
handler: str
@attr.s(slots=True)
class SystemOptions:
"""Config entry system options."""
disable_new_entities = attr.ib(type=bool, default=False)
def update(self, *, disable_new_entities: bool) -> None:
"""Update properties."""
self.disable_new_entities = disable_new_entities
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this config entrys system options."""
return {"disable_new_entities": self.disable_new_entities}
class EntityRegistryDisabledHandler:
"""Handler to handle when entities related to config entries updating disabled_by."""
RELOAD_AFTER_UPDATE_DELAY = 30
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the handler."""
self.hass = hass
self.registry: Optional[entity_registry.EntityRegistry] = None
self.changed: Set[str] = set()
self._remove_call_later: Optional[Callable[[], None]] = None
@callback
def async_setup(self) -> None:
"""Set up the disable handler."""
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._handle_entry_updated
)
async def _handle_entry_updated(self, event: Event) -> None:
"""Handle entity registry entry update."""
if (
event.data["action"] != "update"
or "disabled_by" not in event.data["changes"]
):
return
if self.registry is None:
self.registry = await entity_registry.async_get_registry(self.hass)
entity_entry = self.registry.async_get(event.data["entity_id"])
if (
# Stop if no entry found
entity_entry is None
# Stop if entry not connected to config entry
or entity_entry.config_entry_id is None
# Stop if the entry got disabled. In that case the entity handles it
# themselves.
or entity_entry.disabled_by
):
return
config_entry = self.hass.config_entries.async_get_entry(
entity_entry.config_entry_id
)
assert config_entry is not None
if config_entry.entry_id not in self.changed and await support_entry_unload(
self.hass, config_entry.domain
):
self.changed.add(config_entry.entry_id)
if not self.changed:
return
# We are going to delay reloading on *every* entity registry change so that
# if a user is happily clicking along, it will only reload at the end.
if self._remove_call_later:
self._remove_call_later()
self._remove_call_later = self.hass.helpers.event.async_call_later(
self.RELOAD_AFTER_UPDATE_DELAY, self._handle_reload
)
async def _handle_reload(self, _now: Any) -> None:
"""Handle a reload."""
self._remove_call_later = None
to_reload = self.changed
self.changed = set()
_LOGGER.info(
"Reloading config entries because disabled_by changed in entity registry: %s",
", ".join(self.changed),
)
await asyncio.gather(
*[self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload]
)
async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports entry unloading."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_unload_entry")
|
|
#
# Copyright Ericsson AB 2013. All rights reserved
#
# Authors: Ildiko Vancsa <ildiko.vancsa@ericsson.com>
# Balazs Gibizer <balazs.gibizer@ericsson.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
import time
import weakref
from oslo_config import cfg
from oslo_log import log
from oslo_utils import netutils
import pymongo
import six
from six.moves.urllib import parse
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
# FIXME(dhellmann): Configuration options are not part of the Oslo
# library APIs, and should not be used like this.
cfg.CONF.import_opt('max_retries', 'oslo_db.options', group="database")
cfg.CONF.import_opt('retry_interval', 'oslo_db.options', group="database")
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'}
def make_timestamp_range(start, end,
start_timestamp_op=None, end_timestamp_op=None):
"""Create the query document to find timestamps within that range.
This is done by given two possible datetimes and their operations.
By default, using $gte for the lower bound and $lt for the upper bound.
"""
ts_range = {}
if start:
if start_timestamp_op == 'gt':
start_timestamp_op = '$gt'
else:
start_timestamp_op = '$gte'
ts_range[start_timestamp_op] = start
if end:
if end_timestamp_op == 'le':
end_timestamp_op = '$lte'
else:
end_timestamp_op = '$lt'
ts_range[end_timestamp_op] = end
return ts_range
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
q = {}
ts_range = make_timestamp_range(event_filter.start_timestamp,
event_filter.end_timestamp)
if ts_range:
q['timestamp'] = ts_range
if event_filter.event_type:
q['event_type'] = event_filter.event_type
if event_filter.message_id:
q['_id'] = event_filter.message_id
if event_filter.traits_filter:
q.setdefault('traits')
for trait_filter in event_filter.traits_filter:
op = trait_filter.pop('op', 'eq')
dict_query = {}
for k, v in six.iteritems(trait_filter):
if v is not None:
# All parameters in EventFilter['traits'] are optional, so
# we need to check if they are in the query or no.
if k == 'key':
dict_query.setdefault('trait_name', v)
elif k in ['string', 'integer', 'datetime', 'float']:
dict_query.setdefault('trait_type',
EVENT_TRAIT_TYPES[k])
dict_query.setdefault('trait_value',
v if op == 'eq'
else {OP_SIGN[op]: v})
dict_query = {'$elemMatch': dict_query}
if q['traits'] is None:
q['traits'] = dict_query
elif q.get('$and') is None:
q.setdefault('$and', [{'traits': q.pop('traits')},
{'traits': dict_query}])
else:
q['$and'].append({'traits': dict_query})
return q
def make_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
q = {}
if sample_filter.user:
q['user_id'] = sample_filter.user
if sample_filter.project:
q['project_id'] = sample_filter.project
if sample_filter.meter:
q['counter_name'] = sample_filter.meter
elif require_meter:
raise RuntimeError('Missing required meter specifier')
ts_range = make_timestamp_range(sample_filter.start_timestamp,
sample_filter.end_timestamp,
sample_filter.start_timestamp_op,
sample_filter.end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
if sample_filter.resource:
q['resource_id'] = sample_filter.resource
if sample_filter.source:
q['source'] = sample_filter.source
if sample_filter.message_id:
q['message_id'] = sample_filter.message_id
# so the samples call metadata resource_metadata, so we convert
# to that.
q.update(dict(
('resource_%s' % k, v) for (k, v) in six.iteritems(
improve_keys(sample_filter.metaquery, metaquery=True))))
return q
def quote_key(key, reverse=False):
"""Prepare key for storage data in MongoDB.
:param key: key that should be quoted
:param reverse: boolean, True --- if we need a reverse order of the keys
parts
:return: iter of quoted part of the key
"""
r = -1 if reverse else 1
for k in key.split('.')[::r]:
if k.startswith('$'):
k = parse.quote(k)
yield k
def improve_keys(data, metaquery=False):
"""Improves keys in dict if they contained '.' or started with '$'.
:param data: is a dictionary where keys need to be checked and improved
:param metaquery: boolean, if True dots are not escaped from the keys
:return: improved dictionary if keys contained dots or started with '$':
{'a.b': 'v'} -> {'a': {'b': 'v'}}
{'$ab': 'v'} -> {'%24ab': 'v'}
"""
if not isinstance(data, dict):
return data
if metaquery:
for key in six.iterkeys(data):
if '.$' in key:
key_list = []
for k in quote_key(key):
key_list.append(k)
new_key = '.'.join(key_list)
data[new_key] = data.pop(key)
else:
for key, value in data.items():
if isinstance(value, dict):
improve_keys(value)
if '.' in key:
new_dict = {}
for k in quote_key(key, reverse=True):
new = {}
new[k] = new_dict if new_dict else data.pop(key)
new_dict = new
data.update(new_dict)
else:
if key.startswith('$'):
new_key = parse.quote(key)
data[new_key] = data.pop(key)
return data
def unquote_keys(data):
"""Restores initial view of 'quoted' keys in dictionary data
:param data: is a dictionary
:return: data with restored keys if they were 'quoted'.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, dict):
unquote_keys(value)
if key.startswith('%24'):
k = parse.unquote(key)
data[k] = data.pop(key)
return data
class ConnectionPool(object):
def __init__(self):
self._pool = {}
def connect(self, url):
connection_options = pymongo.uri_parser.parse_uri(url)
del connection_options['database']
del connection_options['username']
del connection_options['password']
del connection_options['collection']
pool_key = tuple(connection_options)
if pool_key in self._pool:
client = self._pool.get(pool_key)()
if client:
return client
splitted_url = netutils.urlsplit(url)
log_data = {'db': splitted_url.scheme,
'nodelist': connection_options['nodelist']}
LOG.info(_('Connecting to %(db)s on %(nodelist)s') % log_data)
client = self._mongo_connect(url)
self._pool[pool_key] = weakref.ref(client)
return client
@staticmethod
def _mongo_connect(url):
try:
client = MongoProxy(
pymongo.MongoClient(
url, replicaSet=cfg.CONF.database.mongodb_replica_set
)
)
return client
except pymongo.errors.ConnectionFailure as e:
LOG.warn(_('Unable to connect to the database server: '
'%(errmsg)s.') % {'errmsg': e})
raise
class QueryTransformer(object):
operators = {"<": "$lt",
">": "$gt",
"<=": "$lte",
"=<": "$lte",
">=": "$gte",
"=>": "$gte",
"!=": "$ne",
"in": "$in",
"=~": "$regex"}
complex_operators = {"or": "$or",
"and": "$and"}
ordering_functions = {"asc": pymongo.ASCENDING,
"desc": pymongo.DESCENDING}
def transform_orderby(self, orderby):
orderby_filter = []
for field in orderby:
field_name = list(field.keys())[0]
ordering = self.ordering_functions[list(field.values())[0]]
orderby_filter.append((field_name, ordering))
return orderby_filter
@staticmethod
def _move_negation_to_leaf(condition):
"""Moves every not operator to the leafs.
Moving is going by applying the De Morgan rules and annihilating
double negations.
"""
def _apply_de_morgan(tree, negated_subtree, negated_op):
if negated_op == "and":
new_op = "or"
else:
new_op = "and"
tree[new_op] = [{"not": child}
for child in negated_subtree[negated_op]]
del tree["not"]
def transform(subtree):
op = list(subtree.keys())[0]
if op in ["and", "or"]:
[transform(child) for child in subtree[op]]
elif op == "not":
negated_tree = subtree[op]
negated_op = list(negated_tree.keys())[0]
if negated_op == "and":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "or":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "not":
# two consecutive not annihilates themselves
value = list(negated_tree.values())[0]
new_op = list(value.keys())[0]
subtree[new_op] = negated_tree[negated_op][new_op]
del subtree["not"]
transform(subtree)
transform(condition)
def transform_filter(self, condition):
# in Mongo not operator can only be applied to
# simple expressions so we have to move every
# not operator to the leafs of the expression tree
self._move_negation_to_leaf(condition)
return self._process_json_tree(condition)
def _handle_complex_op(self, complex_op, nodes):
element_list = []
for node in nodes:
element = self._process_json_tree(node)
element_list.append(element)
complex_operator = self.complex_operators[complex_op]
op = {complex_operator: element_list}
return op
def _handle_not_op(self, negated_tree):
# assumes that not is moved to the leaf already
# so we are next to a leaf
negated_op = list(negated_tree.keys())[0]
negated_field = list(negated_tree[negated_op].keys())[0]
value = negated_tree[negated_op][negated_field]
if negated_op == "=":
return {negated_field: {"$ne": value}}
elif negated_op == "!=":
return {negated_field: value}
else:
return {negated_field: {"$not":
{self.operators[negated_op]: value}}}
def _handle_simple_op(self, simple_op, nodes):
field_name = list(nodes.keys())[0]
field_value = list(nodes.values())[0]
# no operator for equal in Mongo
if simple_op == "=":
op = {field_name: field_value}
return op
operator = self.operators[simple_op]
op = {field_name: {operator: field_value}}
return op
def _process_json_tree(self, condition_tree):
operator_node = list(condition_tree.keys())[0]
nodes = list(condition_tree.values())[0]
if operator_node in self.complex_operators:
return self._handle_complex_op(operator_node, nodes)
if operator_node == "not":
negated_tree = condition_tree[operator_node]
return self._handle_not_op(negated_tree)
return self._handle_simple_op(operator_node, nodes)
def safe_mongo_call(call):
def closure(*args, **kwargs):
max_retries = cfg.CONF.database.max_retries
retry_interval = cfg.CONF.database.retry_interval
attempts = 0
while True:
try:
return call(*args, **kwargs)
except pymongo.errors.AutoReconnect as err:
if 0 <= max_retries <= attempts:
LOG.error(_('Unable to reconnect to the primary mongodb '
'after %(retries)d retries. Giving up.') %
{'retries': max_retries})
raise
LOG.warn(_('Unable to reconnect to the primary mongodb: '
'%(errmsg)s. Trying again in %(retry_interval)d '
'seconds.') %
{'errmsg': err, 'retry_interval': retry_interval})
attempts += 1
time.sleep(retry_interval)
return closure
class MongoConn(object):
def __init__(self, method):
self.method = method
@safe_mongo_call
def __call__(self, *args, **kwargs):
return self.method(*args, **kwargs)
MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection)
if not typ.startswith('_')])
MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient)
if not typ.startswith('_')]))
MONGO_METHODS.update(set([typ for typ in dir(pymongo)
if not typ.startswith('_')]))
class MongoProxy(object):
def __init__(self, conn):
self.conn = conn
def __getitem__(self, item):
"""Create and return proxy around the method in the connection.
:param item: name of the connection
"""
return MongoProxy(self.conn[item])
def find(self, *args, **kwargs):
# We need this modifying method to return a CursorProxy object so that
# we can handle the Cursor next function to catch the AutoReconnect
# exception.
return CursorProxy(self.conn.find(*args, **kwargs))
def __getattr__(self, item):
"""Wrap MongoDB connection.
If item is the name of an executable method, for example find or
insert, wrap this method in the MongoConn.
Else wrap getting attribute with MongoProxy.
"""
if item in ('name', 'database'):
return getattr(self.conn, item)
if item in MONGO_METHODS:
return MongoConn(getattr(self.conn, item))
return MongoProxy(getattr(self.conn, item))
def __call__(self, *args, **kwargs):
return self.conn(*args, **kwargs)
class CursorProxy(pymongo.cursor.Cursor):
def __init__(self, cursor):
self.cursor = cursor
def __getitem__(self, item):
return self.cursor[item]
@safe_mongo_call
def next(self):
"""Wrap Cursor next method.
This method will be executed before each Cursor next method call.
"""
try:
save_cursor = self.cursor.clone()
return self.cursor.next()
except pymongo.errors.AutoReconnect:
self.cursor = save_cursor
raise
def __getattr__(self, item):
return getattr(self.cursor, item)
|
|
import math
import os
import timeit;
program_start_time = timeit.default_timer()
import random
random.seed(int(timeit.default_timer()))
from phoneme_set import phoneme_set_39_list
import formatting, preprocessWavs
import general_tools
import logging
logger = logging.getLogger('PrepTCDTIMIT')
logger.setLevel(logging.DEBUG)
FORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '
formatter = logging.Formatter(formatting.formatter_message(FORMAT, False))
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# File logger: see below META VARIABLES
##### SCRIPT META VARIABLES #####
DEBUG = False
debug_size = 50
# TODO: MODIFY THESE PARAMETERS for other nbPhonemes of mfccTypes. Save location is updated automatically.
nbMFCCs = 39 # 13= just mfcc (13 features). 26 = also derivative (26 features). 39 = also 2nd derivative (39 features)
nbPhonemes = 39
phoneme_set_list = phoneme_set_39_list # import list of phonemes,
# convert to dictionary with number mappings (see phoneme_set.py)
values = [i for i in range(0, len(phoneme_set_list))]
phoneme_classes = dict(zip(phoneme_set_list, values))
############### DATA LOCATIONS ###################
dataPreSplit = True #some datasets have a pre-defined TEST set (eg TIMIT)
FRAC_VAL = 0.1 # fraction of training data to be used for validation
root = os.path.expanduser("~/TCDTIMIT/audioSR/") # ( keep the trailing slash)
if dataPreSplit:
dataset = "TIMIT" #eg TIMIT. You can also manually split up TCDTIMIT according to train/test split in Harte, N.; Gillen, E., "TCD-TIMIT: An Audio-Visual Corpus of Continuous Speech," doi: 10.1109/TMM.2015.2407694
## eg TIMIT ##
dataRootDir = root+dataset+"/fixed" + str(nbPhonemes) + os.sep + dataset
train_source_path = os.path.join(dataRootDir, 'TRAIN')
test_source_path = os.path.join(dataRootDir, 'TEST')
outputDir = root + dataset + "/binary" + str(nbPhonemes) + os.sep + dataset
else:
## just a bunch of wav and phn files, not split up in train and test -> create the split yourself.
dataset = "TCDTIMIT"
dataRootDir = root + dataset + "/fixed" + str(nbPhonemes) + "_nonSplit" + os.sep + dataset
outputDir = root + dataset + "/binary" + str(nbPhonemes) + os.sep + os.path.basename(dataRootDir)
FRAC_TRAINING = 0.9 # TOTAL = TRAINING + TEST = TRAIN + VALIDATION + TEST
### store path
target = os.path.join(outputDir, os.path.basename(dataRootDir) + '_' + str(nbMFCCs) + '_ch');
target_path = target + '.pkl'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# Already exists, ask if overwrite
if (os.path.exists(target_path)):
if (not general_tools.query_yes_no(target_path + " exists. Overwrite?", "no")):
raise Exception("Not Overwriting")
# set log file
logFile = outputDir + os.sep + os.path.basename(target) + '.log'
fh = logging.FileHandler(logFile, 'w') # create new logFile
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
### SETUP ###
if DEBUG:
logger.info('DEBUG mode: \tACTIVE, only a small dataset will be preprocessed')
target_path = target + '_DEBUG.pkl'
else:
logger.info('DEBUG mode: \tDEACTIVE')
debug_size = None
##### The PREPROCESSING itself #####
logger.info('Preprocessing data ...')
# FIRST, gather the WAV and PHN files, generate MFCCs, extract labels to make inputs and targets for the network
# for a dataset containing no TRAIN/TEST subdivision, just a bunch of wavs -> choose training set yourself
def processDataset(FRAC_TRAINING, data_source_path, logger=None):
logger.info(' Data: %s ', data_source_path)
X_all, y_all, valid_frames_all = preprocessWavs.preprocess_dataset(source_path=data_source_path, nbMFCCs=nbMFCCs, logger=logger, debug=debug_size)
assert len(X_all) == len(y_all) == len(valid_frames_all)
logger.info(' Loading data complete.')
logger.debug('Type and shape/len of X_all')
logger.debug('type(X_all): {}'.format(type(X_all)))
logger.debug('type(X_all[0]): {}'.format(type(X_all[0])))
logger.debug('type(X_all[0][0]): {}'.format(type(X_all[0][0])))
logger.debug('type(X_all[0][0][0]): {}'.format(type(X_all[0][0][0])))
logger.info('Creating Validation index ...')
total_size = len(X_all) # TOTAL = TRAINING + TEST = TRAIN + VAL + TEST
total_training_size = int(math.ceil(FRAC_TRAINING * total_size)) # TRAINING = TRAIN + VAL
test_size = total_size - total_training_size
# split off a 'test' dataset
test_idx = random.sample(range(0, total_training_size), test_size)
test_idx = [int(i) for i in test_idx]
# ensure that the testidation set isn't empty
if DEBUG:
test_idx[0] = 0
test_idx[1] = 1
logger.info('Separating test and training set ...')
X_training = []
y_training = []
valid_frames_training = []
X_test = []
y_test = []
valid_frames_test = []
for i in range(len(X_all)):
if i in test_idx:
X_test.append(X_all[i])
y_test.append(y_all[i])
valid_frames_test.append(valid_frames_all[i])
else:
X_training.append(X_all[i])
y_training.append(y_all[i])
valid_frames_training.append(valid_frames_all[i])
assert len(X_test) == test_size
assert len(X_training) == total_training_size
return X_training, y_training, valid_frames_training, X_test, y_test, valid_frames_test
def processDatasetSplit(train_source_path, test_source_path, logger=None):
logger.info(' Training data: %s ', train_source_path)
X_training, y_training, valid_frames_training = preprocessWavs.preprocess_dataset(source_path=train_source_path, logger=logger,
nbMFCCs=nbMFCCs, debug=debug_size)
logger.info(' Test data: %s', test_source_path)
X_test, y_test, valid_frames_test = preprocessWavs.preprocess_dataset(source_path=test_source_path, logger=logger, nbMFCCs=nbMFCCs, debug=debug_size)
return X_training, y_training, valid_frames_training, X_test, y_test, valid_frames_test
if dataPreSplit: X_training, y_training, valid_frames_training, X_test, y_test, valid_frames_test = \
processDatasetSplit(train_source_path, test_source_path, logger)
else: X_training, y_training, valid_frames_training, X_test, y_test, valid_frames_test = \
processDataset(FRAC_TRAINING, dataRootDir, logger)
# SECOND, split off a 'validation' set from the training set. The remainder is the 'train' set
total_training_size = len(X_training)
val_size = int(math.ceil(total_training_size * FRAC_VAL))
train_size = total_training_size - val_size
val_idx = random.sample(range(0, total_training_size), val_size) # choose random indices to be validation data
val_idx = [int(i) for i in val_idx]
logger.info('Length of training')
logger.info(" train X: %s", len(X_training))
# ensure that the validation set isn't empty
if DEBUG:
val_idx[0] = 0
val_idx[1] = 1
logger.info('Separating training set into validation and train ...')
X_train = []
y_train = []
valid_frames_train = []
X_val = []
y_val = []
valid_frames_val = []
for i in range(len(X_training)):
if i in val_idx:
X_val.append(X_training[i])
y_val.append(y_training[i])
valid_frames_val.append(valid_frames_training[i])
else:
X_train.append(X_training[i])
y_train.append(y_training[i])
valid_frames_train.append(valid_frames_training[i])
assert len(X_val) == val_size
# Print some information
logger.info('Length of train, val, test')
logger.info(" train X: %s", len(X_train))
logger.info(" train y: %s", len(y_train))
logger.info(" train valid_frames: %s", len(valid_frames_train))
logger.info(" val X: %s", len(X_val))
logger.info(" val y: %s", len(y_val))
logger.info(" val valid_frames: %s", len(valid_frames_val))
logger.info(" test X: %s", len(X_test))
logger.info(" test y: %s", len(y_test))
logger.info(" test valid_frames: %s", len(valid_frames_test))
### NORMALIZE data ###
logger.info('Normalizing data ...')
logger.info(' Each channel mean=0, sd=1 ...')
mean_val, std_val, _ = preprocessWavs.calc_norm_param(X_train)
X_train = preprocessWavs.normalize(X_train, mean_val, std_val)
X_val = preprocessWavs.normalize(X_val, mean_val, std_val)
X_test = preprocessWavs.normalize(X_test, mean_val, std_val)
logger.debug('X train')
logger.debug(' %s %s', type(X_train), len(X_train))
logger.debug(' %s %s', type(X_train[0]), X_train[0].shape)
logger.debug(' %s %s', type(X_train[0][0]), X_train[0][0].shape)
logger.debug(' %s %s', type(X_train[0][0][0]), X_train[0][0].shape)
logger.debug('y train')
logger.debug(' %s %s', type(y_train), len(y_train))
logger.debug(' %s %s', type(y_train[0]), y_train[0].shape)
logger.debug(' %s %s', type(y_train[0][0]), y_train[0][0].shape)
# make sure we're working with float32
X_data_type = 'float32'
X_train = preprocessWavs.set_type(X_train, X_data_type)
X_val = preprocessWavs.set_type(X_val, X_data_type)
X_test = preprocessWavs.set_type(X_test, X_data_type)
y_data_type = 'int32'
y_train = preprocessWavs.set_type(y_train, y_data_type)
y_val = preprocessWavs.set_type(y_val, y_data_type)
y_test = preprocessWavs.set_type(y_test, y_data_type)
valid_frames_data_type = 'int32'
valid_frames_train = preprocessWavs.set_type(valid_frames_train, valid_frames_data_type)
valid_frames_val = preprocessWavs.set_type(valid_frames_val, valid_frames_data_type)
valid_frames_test = preprocessWavs.set_type(valid_frames_test, valid_frames_data_type)
# print some more to check that cast succeeded
logger.debug('X train')
logger.debug(' %s %s', type(X_train), len(X_train))
logger.debug(' %s %s', type(X_train[0]), X_train[0].shape)
logger.debug(' %s %s', type(X_train[0][0]), X_train[0][0].shape)
logger.debug(' %s %s', type(X_train[0][0][0]), X_train[0][0].shape)
logger.debug('y train')
logger.debug(' %s %s', type(y_train), len(y_train))
logger.debug(' %s %s', type(y_train[0]), y_train[0].shape)
logger.debug(' %s %s', type(y_train[0][0]), y_train[0][0].shape)
### STORE DATA ###
logger.info('Saving data to %s', target_path)
dataList = [X_train, y_train, valid_frames_train, X_val, y_val, valid_frames_val, X_test, y_test, valid_frames_test]
general_tools.saveToPkl(target_path, dataList)
# these can be used to evaluate new data, so you don't have to load the whole dataset just to normalize
meanStd_path = os.path.dirname(outputDir) + os.sep + os.path.basename(dataRootDir) + "MeanStd.pkl"
logger.info('Saving Mean and Std_val to %s', meanStd_path)
dataList = [mean_val, std_val]
general_tools.saveToPkl(meanStd_path, dataList)
logger.info('Preprocessing complete!')
logger.info('Total time: {:.3f}'.format(timeit.default_timer() - program_start_time))
|
|
from __future__ import division, print_function,absolute_import
import pylab as plt
import amitgroup.plot as gr
import numpy as np
import amitgroup as ag
import os
import pnet
import matplotlib.pylab as plot
from pnet.cyfuncs import index_map_pooling
from Queue import Queue
"""This tutorial introduces restricted boltzmann machines (RBM) using Theano.
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
"""
import cPickle
import gzip
import time
import PIL.Image
import numpy
import theano
import theano.tensor as T
import os
from theano.tensor.shared_randomstreams import RandomStreams
from utils import tile_raster_images
import sklearn.cluster
class RBM(object):
"""Restricted Boltzmann Machine (RBM) """
def __init__(self, input=None, n_visible=784, n_hidden=200, \
W=None, hbias=None, vbias=None, numpy_rng=None,
theano_rng=None):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
:param input: None for standalone RBMs or symbolic variable if RBM is
part of a larger graph.
:param n_visible: number of visible units
:param n_hidden: number of hidden units
:param W: None for standalone RBMs or symbolic variable pointing to a
shared weight matrix in case RBM is part of a DBN network; in a DBN,
the weights are shared between RBMs and layers of a MLP
:param hbias: None for standalone RBMs or symbolic variable pointing
to a shared hidden units bias vector in case RBM is part of a
different network
:param vbias: None for standalone RBMs or a symbolic variable
pointing to a shared visible units bias
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(1234)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
if W is None:
# W is initialized with `initial_W` which is uniformely
# sampled from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if
# converted using asarray to dtype theano.config.floatX so
# that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)),
dtype=theano.config.floatX)
# theano shared variables for weights and biases
W = theano.shared(value=initial_W, name='W', borrow=True)
if hbias is None:
# create shared variable for hidden units bias
hbias = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='hbias', borrow=True)
if vbias is None:
# create shared variable for visible units bias
vbias = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
name='vbias', borrow=True)
# initialize input layer for standalone RBM or layer0 of DBN
self.input = input
if not input:
self.input = T.matrix('input')
self.W = W
self.hbias = hbias
self.vbias = vbias
self.theano_rng = theano_rng
# **** WARNING: It is not a good idea to put things in this list
# other than shared variables created in this function.
self.params = [self.W, self.hbias, self.vbias]
def free_energy(self, v_sample):
''' Function to compute the free energy '''
wx_b = T.dot(v_sample, self.W) + self.hbias
vbias_term = T.dot(v_sample, self.vbias)
hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)
return -hidden_term - vbias_term
def propup(self, vis):
'''This function propagates the visible units activation upwards to
the hidden units
Note that we return also the pre-sigmoid activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_h_given_v(self, v0_sample):
''' This function infers state of hidden units given visible units '''
# compute the activation of the hidden units given a sample of
# the visibles
pre_sigmoid_h1, h1_mean = self.propup(v0_sample)
# get a sample of the hiddens given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
h1_sample = self.theano_rng.binomial(size=h1_mean.shape,
n=1, p=h1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_h1, h1_mean, h1_sample]
def propdown(self, hid):
'''This function propagates the hidden units activation downwards to
the visible units
Note that we return also the pre_sigmoid_activation of the
layer. As it will turn out later, due to how Theano deals with
optimizations, this symbolic variable will be needed to write
down a more stable computational graph (see details in the
reconstruction cost function)
'''
pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias
return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
def sample_v_given_h(self, h0_sample):
''' This function infers state of visible units given hidden units '''
# compute the activation of the visible given the hidden sample
pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)
# get a sample of the visible given their activation
# Note that theano_rng.binomial returns a symbolic sample of dtype
# int64 by default. If we want to keep our computations in floatX
# for the GPU we need to specify to return the dtype floatX
v1_sample = self.theano_rng.binomial(size=v1_mean.shape,
n=1, p=v1_mean,
dtype=theano.config.floatX)
return [pre_sigmoid_v1, v1_mean, v1_sample]
def gibbs_hvh(self, h0_sample):
''' This function implements one step of Gibbs sampling,
starting from the hidden state'''
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [pre_sigmoid_v1, v1_mean, v1_sample,
pre_sigmoid_h1, h1_mean, h1_sample]
def gibbs_vhv(self, v0_sample):
''' This function implements one step of Gibbs sampling,
starting from the visible state'''
pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
return [pre_sigmoid_h1, h1_mean, h1_sample,
pre_sigmoid_v1, v1_mean, v1_sample]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
"""This functions implements one step of CD-k or PCD-k
:param lr: learning rate used to train the RBM
:param persistent: None for CD. For PCD, shared variable
containing old state of Gibbs chain. This must be a shared
variable of size (batch size, number of hidden units).
:param k: number of Gibbs steps to do in CD-k/PCD-k
Returns a proxy for the cost and the updates dictionary. The
dictionary contains the update rules for weights and biases but
also an update of the shared variable used to store the persistent
chain, if one is used.
"""
# compute positive phase
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
# decide how to initialize persistent chain:
# for CD, we use the newly generate hidden sample
# for PCD, we initialize from the old state of the chain
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
# perform actual negative phase
# in order to implement CD-k/PCD-k we need to scan over the
# function that implements one gibbs step k times.
# Read Theano tutorial on scan for more information :
# http://deeplearning.net/software/theano/library/scan.html
# the scan will return the entire Gibbs chain
[pre_sigmoid_nvs, nv_means, nv_samples,
pre_sigmoid_nhs, nh_means, nh_samples], updates = \
theano.scan(self.gibbs_hvh,
# the None are place holders, saying that
# chain_start is the initial state corresponding to the
# 6th output
outputs_info=[None, None, None, None, None, chain_start],
n_steps=k)
# determine gradients on RBM parameters
# not that we only need the sample at the end of the chain
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(
self.free_energy(chain_end))
# We must not compute the gradient through the gibbs sampling
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
# constructs the update dictionary
for gparam, param in zip(gparams, self.params):
# make sure that the learning rate is of the right dtype
updates[param] = param - gparam * T.cast(lr,
dtype=theano.config.floatX)
if persistent:
# Note that this works only if persistent is a shared variable
updates[persistent] = nh_samples[-1]
# pseudo-likelihood is a better proxy for PCD
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
# reconstruction cross-entropy is a better proxy for CD
monitoring_cost = self.get_reconstruction_cost(updates,
pre_sigmoid_nvs[-1])
return monitoring_cost, updates
def get_pseudo_likelihood_cost(self, updates):
"""Stochastic approximation to the pseudo-likelihood"""
# index of bit i in expression p(x_i | x_{\i})
bit_i_idx = theano.shared(value=0, name='bit_i_idx')
# binarize the input image by rounding to nearest integer
xi = T.round(self.input)
# calculate free energy for the given bit configuration
fe_xi = self.free_energy(xi)
# flip bit x_i of matrix xi and preserve all other bits x_{\i}
# Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], but assigns
# the result to xi_flip, instead of working in place on xi.
xi_flip = T.set_subtensor(xi[:, bit_i_idx], 1 - xi[:, bit_i_idx])
# calculate free energy with bit flipped
fe_xi_flip = self.free_energy(xi_flip)
# equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\i})))
cost = T.mean(self.n_visible * T.log(T.nnet.sigmoid(fe_xi_flip -
fe_xi)))
# increment bit_i_idx % number as part of updates
updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible
return cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
"""Approximation to the reconstruction error
Note that this function requires the pre-sigmoid activation as
input. To understand why this is so you need to understand a
bit about how Theano works. Whenever you compile a Theano
function, the computational graph that you pass as input gets
optimized for speed and stability. This is done by changing
several parts of the subgraphs with others. One such
optimization expresses terms of the form log(sigmoid(x)) in
terms of softplus. We need this optimization for the
cross-entropy since sigmoid of numbers larger than 30. (or
even less then that) turn to 1. and numbers smaller than
-30. turn to 0 which in terms will force theano to compute
log(0) and therefore we will get either -inf or NaN as
cost. If the value is expressed in terms of softplus we do not
get this undesirable behaviour. This optimization usually
works fine, but here we have a special case. The sigmoid is
applied inside the scan op, while the log is
outside. Therefore Theano will only see log(scan(..)) instead
of log(sigmoid(..)) and will not apply the wanted
optimization. We can not go and replace the sigmoid in scan
with something else also, because this only needs to be done
on the last step. Therefore the easiest and more efficient way
is to get also the pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan such
that Theano can catch and optimize the expression.
"""
cross_entropy = T.mean(
T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
(1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1))
return cross_entropy
def test_rbm(learning_rate=0.05, training_epochs=30,
dataset='/Users/jiajunshen/Documents/Research/partsNet/data/mnist.pkl.gz', batch_size=20,
n_chains=20, n_samples=10, output_folder='rbm_plots',
n_hidden=20):
"""
Demonstrate how to train and afterwards sample from it using Theano.
This is demonstrated on MNIST.
:param learning_rate: learning rate used for training the RBM
:param training_epochs: number of epochs used for training
:param dataset: path the the pickled dataset
:param batch_size: size of a batch used to train the RBM
:param n_chains: number of parallel Gibbs chains to be used for sampling
:param n_samples: number of samples to plot for each chain
"""
datasets = load_data(shuffledExtract,shuffledLabel)
train_set_x, train_set_y = datasets[0]
# test_set_x, test_set_y = datasets[2]
numVisible = shuffledExtract.shape[1]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
# initialize storage for the persistent chain (state = hidden
# layer of chain)
persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
dtype=theano.config.floatX),
borrow=True)
# construct the RBM class
rbm = RBM(input=x, n_visible= numVisible,
n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)
# get the cost and the gradient corresponding to one step of CD-15
cost, updates = rbm.get_cost_updates(lr=learning_rate,
persistent=persistent_chain, k=15)
#################################
# Training the RBM #
#################################
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
# it is ok for a theano function to have no output
# the purpose of train_rbm is solely to update the RBM parameters
train_rbm = theano.function([index], cost,
updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]},
name='train_rbm')
plotting_time = 0.
start_time = time.clock()
# go through training epochs
for epoch in xrange(training_epochs):
# go through the training set
mean_cost = []
for batch_index in xrange(n_train_batches):
mean_cost += [train_rbm(batch_index)]
print('Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost))
# Plot filters after each training epoch
plotting_start = time.clock()
# Construct image from the weight matrix
# image = PIL.Image.fromarray(tile_raster_images(
# X=rbm.W.get_value(borrow=True).T,
# img_shape=(28, 28), tile_shape=(10, 10),
# tile_spacing=(1, 1)))
# image.save('filters_at_epoch_%i.png' % epoch)
plotting_stop = time.clock()
plotting_time += (plotting_stop - plotting_start)
end_time = time.clock()
pretraining_time = (end_time - start_time) - plotting_time
print ('Training took %f minutes' % (pretraining_time / 60.))
#################################
# Sampling from the RBM #
#################################
# # find out the number of test samples
# number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]
# # pick random test examples, with which to initialize the persistent chain
# test_idx = rng.randint(number_of_test_samples - n_chains)
# persistent_vis_chain = theano.shared(numpy.asarray(
# test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
# dtype=theano.config.floatX))
# plot_every = 1000
# # define one step of Gibbs sampling (mf = mean-field) define a
# # function that does `plot_every` steps before returning the
# # sample for plotting
# [presig_hids, hid_mfs, hid_samples, presig_vis,
# vis_mfs, vis_samples], updates = \
# theano.scan(rbm.gibbs_vhv,
# outputs_info=[None, None, None, None,
# None, persistent_vis_chain],
# n_steps=plot_every)
# # add to updates the shared variable that takes care of our persistent
# # chain :.
# updates.update({persistent_vis_chain: vis_samples[-1]})
# # construct the function that implements our persistent chain.
# # we generate the "mean field" activations for plotting and the actual
# # samples for reinitializing the state of our persistent chain
# sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
# updates=updates,
# name='sample_fn')
# # create a space to store the image for plotting ( we need to leave
# # room for the tile_spacing as well)
# image_data = numpy.zeros((29 * n_samples + 1, 29 * n_chains - 1),
# dtype='uint8')
# for idx in xrange(n_samples):
# # generate `plot_every` intermediate samples that we discard,
# # because successive samples in the chain are too correlated
# vis_mf, vis_sample = sample_fn()
# print(' ... plotting sample ', idx)
# image_data[29 * idx:29 * idx + 28, :] = tile_raster_images(
# X=vis_mf,
# img_shape=(28, 28),
# tile_shape=(1, n_chains),
# tile_spacing=(1, 1))
# # construct image
# image = PIL.Image.fromarray(image_data)
# image.save('samples.png')
# os.chdir('../')
return rbm
def extract(ims,allLayers):
#print(allLayers)
curX = ims
for layer in allLayers:
#print('-------------')
#print(layer)
curX = layer.extract(curX)
#print(np.array(curX).shape)
#print('------------------')
return curX
def partsPool(originalPartsRegion, numParts):
partsGrid = np.zeros((1,1,numParts))
for i in range(originalPartsRegion.shape[0]):
for j in range(originalPartsRegion.shape[1]):
if(originalPartsRegion[i,j]!=-1):
partsGrid[0,0,originalPartsRegion[i,j]] = 1
return partsGrid
def test(ims,labels,net):
yhat = net.classify(ims)
return yhat == labels
def testInvestigation(ims, labels, net):
yhat = net.classify((ims,500))
return np.where(yhat!=labels), yhat
def load_data(allDataX,allDataLabel):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
# #############
# # LOAD DATA #
# #############
# # Download the MNIST dataset if it is not present
# data_dir, data_file = os.path.split(dataset)
# if data_dir == "" and not os.path.isfile(dataset):
# # Check if dataset is in the data directory.
# new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
# if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
# dataset = new_path
# if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
#origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
#print ('Downloading data from %s' % origin)
#urllib.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
# f = gzip.open(dataset, 'rb')
# train_set, valid_set, test_set = cPickle.load(f)
# f.close()
train_set = (allDataX[:5000],allDataLabel[:5000])
# valid_set = (allDataX[4000:],allDataLabel[4000:])
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
print(data_x.shape,data_y.shape)
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
# test_set_x, test_set_y = shared_dataset(test_set)
# valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y)]#, (valid_set_x, valid_set_y)]#,
# (test_set_x, test_set_y)]
return rval
#def trainPOP():
if pnet.parallel.main(__name__):
#X = np.load("testMay151.npy")
#X = np.load("_3_100*6*6_1000*1*1_Jun_16_danny.npy")
X = np.load("original6*6 2.npy")
#X = np.load("sequential6*6.npy")
model = X.item()
# get num of Parts
numParts = model['layers'][1]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
ims,labels = ag.io.load_mnist('training')
trainingDataNum = 1000
firstLayerShape = 6
extractedFeature = extract(ims[0:trainingDataNum],allLayer[0:2])[0]
print(extractedFeature.shape)
extractedFeature = extractedFeature.reshape(extractedFeature.shape[0:3])
partsPlot = np.zeros((numParts,firstLayerShape,firstLayerShape))
partsCodedNumber = np.zeros(numParts)
imgRegion= [[] for x in range(numParts)]
partsRegion = [[] for x in range(numParts)]
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(29 - firstLayerShape):
for n in range(29 - firstLayerShape):
if(codeParts[m,n]!=-1):
partsPlot[codeParts[m,n]]+=ims[i,m:m+firstLayerShape,n:n+firstLayerShape]
partsCodedNumber[codeParts[m,n]]+=1
for j in range(numParts):
partsPlot[j] = partsPlot[j]/partsCodedNumber[j]
secondLayerCodedNumber = 0
secondLayerShape = 12
frame = (secondLayerShape - firstLayerShape)/2
frame = int(frame)
totalRange = 29 - firstLayerShape
if 1:
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(totalRange)[frame:totalRange - frame]:
for n in range(totalRange)[frame:totalRange - frame]:
if(codeParts[m,n]!=-1):
imgRegion[codeParts[m,n]].append(ims[i, m - frame:m + secondLayerShape - frame,n - frame:n + secondLayerShape - frame])
secondLayerCodedNumber+=1
partsGrid = partsPool(codeParts[m-frame:m+frame + 1,n-frame:n+frame + 1],numParts)
partsRegion[codeParts[m,n]].append(partsGrid)
newPartsRegion = []
for i in range(numParts):
newPartsRegion.append(np.asarray(partsRegion[i],dtype = np.uint8))
np.save('/var/tmp/partsRegionOriginalJun29.npy',newPartsRegion)
np.save('/var/tmp/imgRegionOriginalJun29.npy',imgRegion)
##second-layer parts
numSecondLayerParts = 10
allPartsLayer = [[pnet.PartsLayer(numSecondLayerParts,(1,1),
settings=dict(outer_frame = 0,
threshold = 5,
sample_per_image = 1,
max_samples=10000,
min_prob = 0.005,
#min_llh = -40
))]
for i in range(numParts)]
allPartsLayerImg = np.zeros((numParts,numSecondLayerParts,secondLayerShape,secondLayerShape))
allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts))
zeroParts = 0
imgRegionPool = [[] for i in range(numParts * numSecondLayerParts)]
for i in range(numParts):
if(not partsRegion[i]):
continue
allPartsLayer[i][0].train_from_samples(np.array(partsRegion[i]),None)
extractedFeaturePart = extract(np.array(partsRegion[i],dtype = np.uint8),allPartsLayer[i])[0]
print(extractedFeaturePart.shape)
for j in range(len(partsRegion[i])):
if(extractedFeaturePart[j,0,0,0]!=-1):
partIndex = extractedFeaturePart[j,0,0,0]
allPartsLayerImg[i,partIndex]+=imgRegion[i][j]
imgRegionPool[i * numSecondLayerParts + partIndex].append(imgRegion[i][j])
allPartsLayerImgNumber[i,partIndex]+=1
else:
zeroParts+=1
for i in range(numParts):
for j in range(numSecondLayerParts):
if(allPartsLayerImgNumber[i,j]):
allPartsLayerImg[i,j] = allPartsLayerImg[i,j]/allPartsLayerImgNumber[i,j]
#np.save("exPartsOriginalJun29.npy",allPartsLayer)
if 1:
"""
Visualize the SuperParts
"""
settings = {'interpolation':'nearest','cmap':plot.cm.gray,}
settings['vmin'] = 0
settings['vmax'] = 1
plotData = np.ones(((2 + secondLayerShape)*100+2,(2+secondLayerShape)*(numSecondLayerParts + 1)+2))*0.8
visualShiftParts = 0
if 0:
allPartsPlot = np.zeros((20,numSecondLayerParts + 1,12,12))
gr.images(partsPlot.reshape(numParts,6,6),zero_to_one=False,vmin = 0, vmax = 1)
allPartsPlot[:,0] = 0.5
allPartsPlot[:,0,3:9,3:9] = partsPlot[20:40]
allPartsPlot[:,1:,:,:] = allPartsLayerImg[20:40]
gr.images(allPartsPlot.reshape(20 * (numSecondLayerParts + 1),12,12),zero_to_one=False, vmin = 0, vmax =1)
elif 1:
for i in range(numSecondLayerParts + 1):
for j in range(numParts):
if i == 0:
plotData[5 + j * (2 + secondLayerShape):5+firstLayerShape + j * (2 + secondLayerShape), 5 + i * (2 + secondLayerShape): 5+firstLayerShape + i * (2 + secondLayerShape)] = partsPlot[j+visualShiftParts]
else:
plotData[2 + j * (2 + secondLayerShape):2 + secondLayerShape+ j * (2 + secondLayerShape),2 + i * (2 + secondLayerShape): 2+ secondLayerShape + i * (2 + secondLayerShape)] = allPartsLayerImg[j+visualShiftParts,i-1]
plot.figure(figsize=(10,40))
plot.axis('off')
plot.imshow(plotData, **settings)
plot.savefig('originalExParts_2.pdf',format='pdf',dpi=900)
else:
pass
"""
Train A Class-Model Layer
"""
digits = range(10)
sup_ims = []
sup_labels = []
classificationTrainingNum = 1000
for d in digits:
ims0 = ag.io.load_mnist('training', [d], selection = slice(classificationTrainingNum), return_labels = False)
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0),dtype = np.int64))
sup_ims = np.concatenate(sup_ims, axis = 0)
sup_labels = np.concatenate(sup_labels,axis = 0)
#thirLevelCurx = np.load('./thirdLevelCurx.npy')
thirLevelCurx = np.load('./thirdLevelCurx_LargeMatch.npy')[:5000]
poolHelper = pnet.PoolingLayer(shape = (4,4),strides = (4,4))
thirLevelCurx = np.array(thirLevelCurx, dtype = np.int64)
pooledExtract = poolHelper.extract((thirLevelCurx[:,:,:,np.newaxis],500))
testImg_curX = np.load('./thirdLevelCurx_Test.npy')[:5000]
testImg_curX = np.array(testImg_curX, dtype = np.int64)
pooledTest = poolHelper.extract((testImg_curX[:,:,:,np.newaxis],500))
print(pooledExtract.sum(axis = 3))
print(pooledExtract.shape)
sup_labels = sup_labels[:5000]
sup_ims = sup_ims[:5000]
index = np.arange(5000)
randomIndex = np.random.shuffle(index)
pooledExtract = pooledExtract.reshape(5000,-1)
shuffledExtract = pooledExtract[index]
shuffledLabel = sup_labels[index]
testImg = sup_ims[index]
datasets = load_data(shuffledExtract,shuffledLabel)
train_set_x, train_set_y = datasets[0]
#testRbm = test_rbm()
#weights = testRbm.W.get_value(borrow=True)
#np.save('weights20Hidden.npy',weights)
weights = np.load('weights20Hidden.npy')
weights = weights.reshape(4,4,500,20)
newsup_labels = []
classificationTrainingNum = 100
for d in digits:
newsup_labels.append(d * np.ones(100,dtype = np.int64))
sup_labels = np.concatenate(newsup_labels,axis = 0)
trainingImg_curX_all = np.load('./thirdLevelCurx_LargeMatch.npy')
trainingImg_curX = trainingImg_curX_all[:1000]
for d in digits:
trainingImg_curX[d * 100: (d + 1)*100] = trainingImg_curX_all[d * 1000: d*1000+100]
trainingImg_curX = np.array(trainingImg_curX, dtype = np.int64)
pooledTrain = poolHelper.extract((trainingImg_curX[:,:,:,np.newaxis],500))
trainLabels = sup_labels
newPooledExtract = np.array(pooledTrain[:1000]).reshape(1000,4,4,500)
if 1:
for p in range(4):
for q in range(4):
location1 = newPooledExtract[:,p,q,:]
data = weights[p,q,:500,:]
X = np.array(data.reshape(500,20),dtype=np.double)
kmeans = sklearn.cluster.k_means(np.array(X,dtype = np.double),10)[1]
skipIndex = np.argmax(np.bincount(kmeans))
#Put in all the array of group index here
groupIndexArray = [[] for m in range(10)]
for i in range(10):
if i == skipIndex:
continue
testIndex = i
indexArray = np.where(kmeans == testIndex)[0]
groupIndexArray[testIndex].append(indexArray)
poolingIndex = [[] for m in range(500)]
for k in np.where(np.max(location1,axis=0)!=0)[0]:
if kmeans[k] == skipIndex:
continue
else:
distanceArray = np.array([np.sum((X[m,:]-X[k,:]) * (X[m,:]-X[k,:])) for m in groupIndexArray[kmeans[k]][0]])
#print(distanceArray.shape)
numPooling = (distanceArray.shape[0] + 1)//2
# print(numPooling)
finalPooling = groupIndexArray[kmeans[k]][0][np.argsort(distanceArray)[:numPooling]]
#print(k, finalPooling)
poolingIndex[k].append(finalPooling)
for r in range(1000):
print(r)
for m in range(500):
if newPooledExtract[r,p,q,m] == 1:
if len(poolingIndex[m])==0:
continue
else:
# print(poolingIndex[m][0])
newPooledExtract[r,p,q,:][poolingIndex[m][0]] = 1
#pass
if 0:
for p in range(5):
print(trainLabels[p])
gr.images(trainImg[p])
for m in range(4):
for n in range(4):
gr.images(np.array([allPartsLayerImg[(k%500)//10,k - ((k%500)//10) * 10] for k in np.where(newPooledExtract[p,m,n,:]==1)[0]]))
testImg_curX = np.load('./thirdLevelCurx_Test.npy')
testImg_curX = np.array(testImg_curX, dtype = np.int64)
pooledTest = poolHelper.extract((testImg_curX[:,:,:,np.newaxis],500))
testingNum = 1000
testImg,testLabels = ag.io.load_mnist('testing')
print(pooledTest.shape)
newPooledExtractTest = np.array(pooledTest[:testingNum]).reshape(testingNum,4,4,500)
if 1:
for p in range(4):
for q in range(4):
location1 = newPooledExtractTest[:,p,q,:]
data = weights[p,q,:500,:]
X = np.array(data.reshape(500,20),dtype=np.double)
kmeans = sklearn.cluster.k_means(np.array(X,dtype = np.double),10)[1]
skipIndex = np.argmax(np.bincount(kmeans))
#Put in all the array of group index here
groupIndexArray = [[] for m in range(10)]
for i in range(10):
if i == skipIndex:
continue
testIndex = i
indexArray = np.where(kmeans == testIndex)[0]
groupIndexArray[testIndex].append(indexArray)
poolingIndex = [[] for m in range(500)]
for k in np.where(np.max(location1,axis=0)!=0)[0]:
if kmeans[k] == skipIndex:
continue
else:
distanceArray = np.array([np.sum((X[m,:]-X[k,:]) * (X[m,:]-X[k,:])) for m in groupIndexArray[kmeans[k]][0]])
#print(distanceArray.shape)
numPooling = (distanceArray.shape[0] + 1)//2
# print(numPooling)
finalPooling = groupIndexArray[kmeans[k]][0][np.argsort(distanceArray)[:numPooling]]
#print(k, finalPooling)
poolingIndex[k].append(finalPooling)
for r in range(testingNum):
print(r)
for m in range(500):
if newPooledExtractTest[r,p,q,m] == 1:
if len(poolingIndex[m])==0:
continue
else:
# print(poolingIndex[m][0])
newPooledExtractTest[r,p,q,:][poolingIndex[m][0]] = 1
#pass
newPooledExtract = newPooledExtract.reshape(1000,-1)
newPooledExtractTest = newPooledExtractTest.reshape(testingNum,-1)
#Train a class Model#
testLabels = testLabels[:testingNum]
svmLayer = pnet.SVMClassificationLayer(C = 1.0)
svmLayer.train(newPooledExtract[:1000], trainLabels[:1000])
print("Training Success!")
testImg_Input = np.array(newPooledExtractTest, dtype = np.int64)
testImg_batches = np.array_split(newPooledExtractTest[:testingNum], 200)
print(np.mean(svmLayer.extract(testImg_Input) == testLabels))
if 0:
testLabels_batches = np.array_split(testLabels, 200)
args = [tup + (svmLayer,) for tup in zip(testImg_batches, testLabels_batches)]
corrects = 0
total = 0
def format_error_rate(pr):
return "{:.2f}%".format(100 * (1-pr))
def clustering(X,L,layer):
return L == layer.extract(X)
print("Testing Starting...")
for i, res in enumerate(pnet.parallel.starmap_unordered(clustering,args)):
if i !=0 and i % 20 ==0:
print("{0:05}/{1:05} Error rate: {2}".format(total, len(ims),format_error_rate(pr)))
corrects += res.sum()
print(res.sum())
total += res.size
pr = corrects / total
print("Final error rate:", format_error_rate(pr))
|
|
import abc
import os
import time
import markupsafe
import requests
from django.db import models
from framework.auth import Auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError, PermissionsError
from mako.lookup import TemplateLookup
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.external import ExternalAccount
from osf.models.node import AbstractNode
from osf.models.user import OSFUser
from osf.modm_compat import Q
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from website import settings
from addons.base import logger, serializer
from website.oauth.signals import oauth_complete
from website.util import waterbutler_url_for
lookup = TemplateLookup(
directories=[
settings.TEMPLATES_PATH
],
default_filters=[
'unicode', # default filter; must set explicitly when overriding
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'temp_ampersand_fixer',
'h',
],
imports=[
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'from website.util.sanitize import temp_ampersand_fixer',
]
)
class BaseAddonSettings(ObjectIDMixin, BaseModel):
deleted = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def config(self):
return self._meta.app_config
@property
def short_name(self):
return self.config.short_name
def delete(self, save=True):
self.deleted = True
self.on_delete()
if save:
self.save()
def undelete(self, save=True):
self.deleted = False
self.on_add()
if save:
self.save()
def to_json(self, user):
return {
'addon_short_name': self.config.short_name,
'addon_full_name': self.config.full_name,
}
#############
# Callbacks #
#############
def on_add(self):
"""Called when the addon is added (or re-added) to the owner (User or Node)."""
pass
def on_delete(self):
"""Called when the addon is deleted from the owner (User or Node)."""
pass
class BaseUserSettings(BaseAddonSettings):
owner = models.OneToOneField(OSFUser, blank=True, null=True, related_name='%(app_label)s_user_settings')
class Meta:
abstract = True
@property
def public_id(self):
return None
@property
def has_auth(self):
"""Whether the user has added credentials for this addon."""
return False
# TODO: Test me @asmacdo
@property
def nodes_authorized(self):
"""Get authorized, non-deleted nodes. Returns an empty list if the
attached add-on does not include a node model.
"""
model = self.config.node_settings
if not model:
return []
return [obj.owner for obj in model.objects.filter(user_settings=self, owner__is_deleted=False).select_related('owner')]
@property
def can_be_merged(self):
return hasattr(self, 'merge')
def to_json(self, user):
ret = super(BaseUserSettings, self).to_json(user)
ret['has_auth'] = self.has_auth
ret.update({
'nodes': [
{
'_id': node._id,
'url': node.url,
'title': node.title,
'registered': node.is_registration,
'api_url': node.api_url
}
for node in self.nodes_authorized
]
})
return ret
def __repr__(self):
if self.owner:
return '<{cls} owned by user {uid}>'.format(cls=self.__class__.__name__, uid=self.owner._id)
return '<{cls} with no owner>'.format(cls=self.__class__.__name__)
@oauth_complete.connect
def oauth_complete(provider, account, user):
if not user or not account:
return
user.add_addon(account.provider)
user.save()
class BaseOAuthUserSettings(BaseUserSettings):
# Keeps track of what nodes have been given permission to use external
# accounts belonging to the user.
oauth_grants = DateTimeAwareJSONField(default=dict, blank=True)
# example:
# {
# '<Node._id>': {
# '<ExternalAccount._id>': {
# <metadata>
# },
# }
# }
#
# metadata here is the specific to each addon.
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
serializer = serializer.OAuthAddonSerializer
class Meta:
abstract = True
@property
def has_auth(self):
return self.external_accounts.exists()
@property
def external_accounts(self):
"""The user's list of ``ExternalAccount`` instances for this provider"""
return self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
def delete(self, save=True):
for account in self.external_accounts.filter(provider=self.config.short_name):
self.revoke_oauth_access(account, save=False)
super(BaseOAuthUserSettings, self).delete(save=save)
def grant_oauth_access(self, node, external_account, metadata=None):
"""Give a node permission to use an ``ExternalAccount`` instance."""
# ensure the user owns the external_account
if not self.owner.external_accounts.filter(id=external_account.id).exists():
raise PermissionsError()
metadata = metadata or {}
# create an entry for the node, if necessary
if node._id not in self.oauth_grants:
self.oauth_grants[node._id] = {}
# create an entry for the external account on the node, if necessary
if external_account._id not in self.oauth_grants[node._id]:
self.oauth_grants[node._id][external_account._id] = {}
# update the metadata with the supplied values
for key, value in metadata.iteritems():
self.oauth_grants[node._id][external_account._id][key] = value
self.save()
@must_be_logged_in
def revoke_oauth_access(self, external_account, auth, save=True):
"""Revoke all access to an ``ExternalAccount``.
TODO: This should accept node and metadata params in the future, to
allow fine-grained revocation of grants. That's not yet been needed,
so it's not yet been implemented.
"""
for node in self.get_nodes_with_oauth_grants(external_account):
try:
node.get_addon(external_account.provider, deleted=True).deauthorize(auth=auth)
except AttributeError:
# No associated addon settings despite oauth grant
pass
if external_account.osfuser_set.count() == 1 and \
external_account.osfuser_set.filter(id=auth.user.id).exists():
# Only this user is using the account, so revoke remote access as well.
self.revoke_remote_oauth_access(external_account)
for key in self.oauth_grants:
self.oauth_grants[key].pop(external_account._id, None)
if save:
self.save()
def revoke_remote_oauth_access(self, external_account):
""" Makes outgoing request to remove the remote oauth grant
stored by third-party provider.
Individual addons must override this method, as it is addon-specific behavior.
Not all addon providers support this through their API, but those that do
should also handle the case where this is called with an external_account
with invalid credentials, to prevent a user from being unable to disconnect
an account.
"""
pass
def verify_oauth_access(self, node, external_account, metadata=None):
"""Verify that access has been previously granted.
If metadata is not provided, this checks only if the node can access the
account. This is suitable to check to see if the node's addon settings
is still connected to an external account (i.e., the user hasn't revoked
it in their user settings pane).
If metadata is provided, this checks to see that all key/value pairs
have been granted. This is suitable for checking access to a particular
folder or other resource on an external provider.
"""
metadata = metadata or {}
# ensure the grant exists
try:
grants = self.oauth_grants[node._id][external_account._id]
except KeyError:
return False
# Verify every key/value pair is in the grants dict
for key, value in metadata.iteritems():
if key not in grants or grants[key] != value:
return False
return True
def get_nodes_with_oauth_grants(self, external_account):
# Generator of nodes which have grants for this external account
for node_id, grants in self.oauth_grants.iteritems():
node = AbstractNode.load(node_id)
if external_account._id in grants.keys() and not node.is_deleted:
yield node
def get_attached_nodes(self, external_account):
for node in self.get_nodes_with_oauth_grants(external_account):
if node is None:
continue
node_settings = node.get_addon(self.oauth_provider.short_name)
if node_settings is None:
continue
if node_settings.external_account == external_account:
yield node
def merge(self, user_settings):
"""Merge `user_settings` into this instance"""
if user_settings.__class__ is not self.__class__:
raise TypeError('Cannot merge different addons')
for node_id, data in user_settings.oauth_grants.iteritems():
if node_id not in self.oauth_grants:
self.oauth_grants[node_id] = data
else:
node_grants = user_settings.oauth_grants[node_id].iteritems()
for ext_acct, meta in node_grants:
if ext_acct not in self.oauth_grants[node_id]:
self.oauth_grants[node_id][ext_acct] = meta
else:
for k, v in meta:
if k not in self.oauth_grants[node_id][ext_acct]:
self.oauth_grants[node_id][ext_acct][k] = v
user_settings.oauth_grants = {}
user_settings.save()
try:
config = settings.ADDONS_AVAILABLE_DICT[
self.oauth_provider.short_name
]
Model = config.models['nodesettings']
except KeyError:
pass
else:
connected = Model.find(Q('user_settings', 'eq', user_settings))
for node_settings in connected:
node_settings.user_settings = self
node_settings.save()
self.save()
def to_json(self, user):
ret = super(BaseOAuthUserSettings, self).to_json(user)
ret['accounts'] = self.serializer(
user_settings=self
).serialized_accounts
return ret
#############
# Callbacks #
#############
def on_delete(self):
"""When the user deactivates the addon, clear auth for connected nodes.
"""
super(BaseOAuthUserSettings, self).on_delete()
nodes = [AbstractNode.load(node_id) for node_id in self.oauth_grants.keys()]
for node in nodes:
node_addon = node.get_addon(self.oauth_provider.short_name)
if node_addon and node_addon.user_settings == self:
node_addon.clear_auth()
class BaseNodeSettings(BaseAddonSettings):
owner = models.OneToOneField(AbstractNode, null=True, blank=True, related_name='%(app_label)s_node_settings')
class Meta:
abstract = True
@property
def complete(self):
"""Whether or not this addon is properly configured
:rtype bool:
"""
raise NotImplementedError()
@property
def configured(self):
"""Whether or not this addon has had a folder connected.
:rtype bool:
"""
return self.complete
@property
def has_auth(self):
"""Whether the node has added credentials for this addon."""
return False
def to_json(self, user):
ret = super(BaseNodeSettings, self).to_json(user)
ret.update({
'user': {
'permissions': self.owner.get_permissions(user)
},
'node': {
'id': self.owner._id,
'api_url': self.owner.api_url,
'url': self.owner.url,
'is_registration': self.owner.is_registration,
},
'node_settings_template': os.path.basename(self.config.node_settings_template),
})
return ret
def render_config_error(self, data):
"""
"""
# Note: `config` is added to `self` in `AddonConfig::__init__`.
template = lookup.get_template('project/addon/config_error.mako')
return template.get_def('config_error').render(
title=self.config.full_name,
name=self.config.short_name,
**data
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param User user:
:param Node node:
"""
pass
def before_remove_contributor(self, node, removed):
"""
:param Node node:
:param User removed:
"""
pass
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
"""
pass
def before_make_public(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def before_make_private(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def after_set_privacy(self, node, permissions):
"""
:param Node node:
:param str permissions:
"""
pass
def before_fork(self, node, user):
"""Return warning text to display if user auth will be copied to a
fork.
:param Node node:
:param Uder user
:returns Alert message
"""
if hasattr(self, 'user_settings'):
if self.user_settings is None:
return (
u'Because you have not configured the {addon} add-on, your authentication will not be '
u'transferred to the forked {category}. You may authorize and configure the {addon} add-on '
u'in the new fork on the settings page.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
elif self.user_settings and self.user_settings.owner == user:
return (
u'Because you have authorized the {addon} add-on for this '
u'{category}, forking it will also transfer your authentication to '
u'the forked {category}.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
else:
return (
u'Because the {addon} add-on has been authorized by a different '
u'user, forking it will not transfer authentication to the forked '
u'{category}. You may authorize and configure the {addon} add-on '
u'in the new fork on the settings page.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
def after_fork(self, node, fork, user, save=True):
"""
:param Node node:
:param Node fork:
:param User user:
:param bool save:
:returns: cloned settings
"""
clone = self.clone()
clone.user_settings = None
clone.owner = fork
if save:
clone.save()
return clone
def before_register(self, node, user):
"""
:param Node node:
:param User user:
:returns: Alert message
"""
pass
def after_register(self, node, registration, user, save=True):
"""
:param Node node:
:param Node registration:
:param User user:
:param bool save:
:returns: Tuple of cloned settings and alert message
"""
return None, None
def after_delete(self, node, user):
"""
:param Node node:
:param User user:
"""
pass
############
# Archiver #
############
class GenericRootNode(object):
path = '/'
name = ''
class BaseStorageAddon(BaseModel):
"""
Mixin class for traversing file trees of addons with files
"""
root_node = GenericRootNode()
class Meta:
abstract = True
@property
def archive_folder_name(self):
name = 'Archive of {addon}'.format(addon=self.config.full_name)
folder_name = getattr(self, 'folder_name', '').lstrip('/').strip()
if folder_name:
name = name + ': {folder}'.format(folder=folder_name)
return name
def _get_fileobj_child_metadata(self, filenode, user, cookie=None, version=None):
kwargs = dict(
provider=self.config.short_name,
path=filenode.get('path', ''),
node=self.owner,
user=user,
view_only=True,
)
if cookie:
kwargs['cookie'] = cookie
if version:
kwargs['version'] = version
metadata_url = waterbutler_url_for('metadata', _internal=True, **kwargs)
res = requests.get(metadata_url)
if res.status_code != 200:
raise HTTPError(res.status_code, data={'error': res.json()})
# TODO: better throttling?
time.sleep(1.0 / 5.0)
return res.json().get('data', [])
def _get_file_tree(self, filenode=None, user=None, cookie=None, version=None):
"""
Recursively get file metadata
"""
filenode = filenode or {
'path': '/',
'kind': 'folder',
'name': self.root_node.name,
}
if filenode.get('kind') == 'file' or 'size' in filenode:
return filenode
kwargs = {
'version': version,
'cookie': cookie,
}
filenode['children'] = [
self._get_file_tree(child, user, cookie=cookie)
for child in self._get_fileobj_child_metadata(filenode, user, **kwargs)
]
return filenode
class BaseOAuthNodeSettings(BaseNodeSettings):
# TODO: Validate this field to be sure it matches the provider's short_name
# NOTE: Do not set this field directly. Use ``set_auth()``
external_account = models.ForeignKey(ExternalAccount, null=True, blank=True,
related_name='%(app_label)s_node_settings')
# NOTE: Do not set this field directly. Use ``set_auth()``
# user_settings = fields.AbstractForeignField()
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
class Meta:
abstract = True
@abc.abstractproperty
def folder_id(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_id' property."
)
@abc.abstractproperty
def folder_name(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_name' property."
)
@abc.abstractproperty
def folder_path(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_path' property."
)
def fetch_folder_name(self):
return self.folder_name
@property
def nodelogger(self):
auth = None
if self.user_settings:
auth = Auth(self.user_settings.owner)
self._logger_class = getattr(
self,
'_logger_class',
type(
'{0}NodeLogger'.format(self.config.short_name.capitalize()),
(logger.AddonNodeLogger,),
{'addon_short_name': self.config.short_name}
)
)
return self._logger_class(
node=self.owner,
auth=auth
)
@property
def complete(self):
return bool(
self.has_auth and
self.external_account and
self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
)
)
@property
def configured(self):
return bool(
self.complete and
(self.folder_id or self.folder_name or self.folder_path)
)
@property
def has_auth(self):
"""Instance has an external account and *active* permission to use it"""
return bool(
self.user_settings and self.user_settings.has_auth
) and bool(
self.external_account and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account
)
)
def clear_settings(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'clear_settings' method."
)
def set_auth(self, external_account, user, metadata=None, log=True):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
# tell the user's addon settings that this node is connected to it
user_settings = user.get_or_add_addon(self.oauth_provider.short_name)
user_settings.grant_oauth_access(
node=self.owner,
external_account=external_account,
metadata=metadata # metadata can be passed in when forking
)
user_settings.save()
# update this instance
self.user_settings = user_settings
self.external_account = external_account
if log:
self.nodelogger.log(action='node_authorized', save=True)
self.save()
def deauthorize(self, auth=None, add_log=False):
"""Remove authorization from this node.
This method should be overridden for addon-specific behavior,
such as logging and clearing non-generalizable settings.
"""
self.clear_auth()
def clear_auth(self):
"""Disconnect the node settings from the user settings.
This method does not remove the node's permission in the user's addon
settings.
"""
self.external_account = None
self.user_settings = None
self.save()
def before_remove_contributor_message(self, node, removed):
"""If contributor to be removed authorized this addon, warn that removing
will remove addon authorization.
"""
if self.has_auth and self.user_settings.owner == removed:
return (
u'The {addon} add-on for this {category} is authenticated by {name}. '
u'Removing this user will also remove write access to {addon} '
u'unless another contributor re-authenticates the add-on.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
name=removed.fullname,
)
# backwards compatibility
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""If removed contributor authorized this addon, remove addon authorization
from owner.
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings.oauth_grants[self.owner._id].pop(self.external_account._id)
self.user_settings.save()
self.clear_auth()
message = (
u'Because the {addon} add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
addon=self.config.full_name,
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""After forking, copy user settings if the user is the one who authorized
the addon.
:return: the cloned settings
"""
clone = super(BaseOAuthNodeSettings, self).after_fork(
node=node,
fork=fork,
user=user,
save=False,
)
if self.has_auth and self.user_settings.owner == user:
metadata = None
if self.complete:
try:
metadata = self.user_settings.oauth_grants[node._id][self.external_account._id]
except (KeyError, AttributeError):
pass
clone.set_auth(self.external_account, user, metadata=metadata, log=False)
else:
clone.clear_settings()
if save:
clone.save()
return clone
def before_register_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
registration.
"""
if self.has_auth:
return (
u'The contents of {addon} add-ons cannot be registered at this time; '
u'the {addon} add-on linked to this {category} will not be included '
u'as part of this registration.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
# backwards compatibility
before_register = before_register_message
def serialize_waterbutler_credentials(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_credentials' method.")
def serialize_waterbutler_settings(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_settings' method.")
class BaseCitationsNodeSettings(BaseOAuthNodeSettings):
class Meta:
abstract = True
def serialize_waterbutler_settings(self, *args, **kwargs):
# required by superclass, not actually used
pass
def serialize_waterbutler_credentials(self, *args, **kwargs):
# required by superclass, not actually used
pass
def create_waterbutler_log(self, *args, **kwargs):
# required by superclass, not actually used
pass
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = self.oauth_provider(account=self.external_account)
return self._api
@property
def complete(self):
"""Boolean indication of addon completeness"""
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.list_id},
))
@property
def root_folder(self):
"""Serialized representation of root folder"""
return self.serializer.serialized_root_folder
@property
def folder_id(self):
return self.list_id
@property
def folder_name(self):
return self.fetch_folder_name
@property
def folder_path(self):
return self.fetch_folder_name
@property
def fetch_folder_name(self):
"""Returns a displayable folder name"""
if self.list_id is None:
return ''
elif self.list_id == 'ROOT':
return 'All Documents'
else:
return self._fetch_folder_name
def clear_settings(self):
"""Clears selected folder configuration"""
self.list_id = None
def set_auth(self, *args, **kwargs):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
self.list_id = None
self.save()
return super(BaseCitationsNodeSettings, self).set_auth(*args, **kwargs)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
if add_log:
self.owner.add_log(
'{0}_node_deauthorized'.format(self.provider_name),
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_settings()
self.clear_auth()
self.save()
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
def on_delete(self):
self.deauthorize(add_log=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.