gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import executor
import host_driver
import linuxfc
import linuxscsi
import os
import socket
import time
from oslo.config import cfg
from cinder.brick import exceptions
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
synchronized = lockutils.synchronized_with_prefix('brick-')
def get_connector_properties():
"""Get the connection properties for all protocols."""
iscsi = ISCSIConnector()
fc = linuxfc.LinuxFibreChannel()
props = {}
props['ip'] = CONF.my_ip
props['host'] = socket.gethostname()
initiator = iscsi.get_initiator()
if initiator:
props['initiator'] = initiator
wwpns = fc.get_fc_wwpns()
if wwpns:
props['wwpns'] = wwpns
wwnns = fc.get_fc_wwnns()
if wwnns:
props['wwnns'] = wwnns
return props
class InitiatorConnector(executor.Executor):
def __init__(self, driver=None, execute=putils.execute,
root_helper="sudo", *args, **kwargs):
super(InitiatorConnector, self).__init__(execute, root_helper,
*args, **kwargs)
if not driver:
driver = host_driver.HostDriver()
self.set_driver(driver)
def set_driver(self, driver):
"""The driver is used to find used LUNs."""
self.driver = driver
@staticmethod
def factory(protocol, execute=putils.execute,
root_helper="sudo", use_multipath=False):
"""Build a Connector object based upon protocol."""
LOG.debug("Factory for %s" % protocol)
protocol = protocol.upper()
if protocol == "ISCSI":
return ISCSIConnector(execute=execute,
root_helper=root_helper,
use_multipath=use_multipath)
elif protocol == "FIBRE_CHANNEL":
return FibreChannelConnector(execute=execute,
root_helper=root_helper,
use_multipath=use_multipath)
else:
msg = (_("Invalid InitiatorConnector protocol "
"specified %(protocol)s") %
dict(protocol=protocol))
raise ValueError(msg)
def check_valid_device(self, path):
cmd = ('dd', 'if=%(path)s' % {"path": path},
'of=/dev/null', 'count=1')
out, info = None, None
try:
out, info = self._execute(*cmd, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to access the device on the path "
"%(path)s: %(error)s %(info)s.") %
{"path": path, "error": e.stderr,
"info": info})
return False
# If the info is none, the path does not exist.
if info is None:
return False
return True
def connect_volume(self, connection_properties):
"""Connect to a volume. The connection_properties
describes the information needed by the specific
protocol to use to make the connection.
"""
raise NotImplementedError()
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host.
The connection_properties are the same as from connect_volume.
The device_info is returned from connect_volume.
"""
raise NotImplementedError()
class ISCSIConnector(InitiatorConnector):
""""Connector class to attach/detach iSCSI volumes."""
def __init__(self, driver=None, execute=putils.execute,
root_helper="sudo", use_multipath=False,
*args, **kwargs):
super(ISCSIConnector, self).__init__(driver, execute, root_helper,
*args, **kwargs)
self.use_multipath = use_multipath
self._linuxscsi = linuxscsi.LinuxSCSI(execute, root_helper)
@synchronized('connect_volume')
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for iSCSI must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
device_info = {'type': 'block'}
if self.use_multipath:
#multipath installed, discovering other targets if available
target_portal = connection_properties['target_portal']
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
target_portal],
check_exit_code=[0, 255])[0] \
or ""
for ip in self._get_target_portals_from_iscsiadm_output(out):
props = connection_properties.copy()
props['target_portal'] = ip
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(connection_properties)
host_device = self._get_device_path(connection_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
if tries >= CONF.num_iscsi_scan_tries:
raise exceptions.VolumeDeviceNotFound(host_device)
LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'host_device': host_device,
'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(connection_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(host_device)s "
"(after %(tries)s rescans)"),
{'host_device': host_device, 'tries': tries})
if self.use_multipath:
#we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
device_info['path'] = host_device
return device_info
@synchronized('connect_volume')
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for iSCSI must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
host_device = self._get_device_path(connection_properties)
multipath_device = None
if self.use_multipath:
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device:
self._linuxscsi.remove_multipath_device(multipath_device)
return self._disconnect_volume_multipath_iscsi(
connection_properties, multipath_device)
# remove the device from the scsi subsystem
# this eliminates any stale entries until logout
dev_name = self._linuxscsi.get_name_from_path(host_device)
if dev_name:
self._linuxscsi.remove_scsi_device(dev_name)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn']})
devices = self.driver.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(connection_properties)
def _get_device_path(self, connection_properties):
path = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-%(lun)s" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn'],
'lun': connection_properties.get('target_lun', 0)})
return path
def get_initiator(self):
"""Secure helper to read file as root."""
file_path = '/etc/iscsi/initiatorname.iscsi'
try:
lines, _err = self._execute('cat', file_path, run_as_root=True,
root_helper=self._root_helper)
for l in lines.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
except putils.ProcessExecutionError:
msg = (_("Could not find the iSCSI Initiator File %s")
% file_path)
LOG.warn(msg)
return None
def _run_iscsiadm(self, connection_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
connection_properties['target_iqn'],
'-p',
connection_properties['target_portal'],
*iscsi_command, run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, connection_properties, property_key,
property_value, **kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(connection_properties, iscsi_command,
**kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
return [line.split()[0] for line in output.splitlines()]
def _disconnect_volume_multipath_iscsi(self, connection_properties,
multipath_name):
"""This removes a multipath device and it's LUNs."""
LOG.debug("Disconnect multipath device %s" % multipath_name)
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.driver.get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(connection_properties)
return
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
if connection_properties['target_iqn'] not in other_iqns:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(connection_properties)
return
# else do not disconnect iscsi portals,
# as they are used for other luns
return
def _connect_to_iscsi_portal(self, connection_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(connection_properties, ())
except putils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(connection_properties, ('--op', 'new'))
else:
raise
if connection_properties.get('auth_method'):
self._iscsiadm_update(connection_properties,
"node.session.auth.authmethod",
connection_properties['auth_method'])
self._iscsiadm_update(connection_properties,
"node.session.auth.username",
connection_properties['auth_username'])
self._iscsiadm_update(connection_properties,
"node.session.auth.password",
connection_properties['auth_password'])
#duplicate logins crash iscsiadm after load,
#so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(connection_properties,
("--login",),
check_exit_code=[0, 255])
except putils.ProcessExecutionError as err:
#as this might be one of many paths,
#only set successfull logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, connection_properties):
self._iscsiadm_update(connection_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _disconnect_mpath(self, connection_properties):
entries = self._get_iscsi_devices()
ips = [ip.split("-")[1] for ip in entries
if connection_properties['target_iqn'] in ip]
for ip in ips:
props = connection_properties.copy()
props['target_portal'] = ip
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm',
*iscsi_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('multipath',
*multipath_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("multipath %s: stdout=%s stderr=%s" %
(multipath_command, out, err))
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath('-r', check_exit_code=[0, 1, 21])
class FibreChannelConnector(InitiatorConnector):
""""Connector class to attach/detach Fibre Channel volumes."""
def __init__(self, driver=None, execute=putils.execute,
root_helper="sudo", use_multipath=False,
*args, **kwargs):
super(FibreChannelConnector, self).__init__(driver, execute,
root_helper,
*args, **kwargs)
self.use_multipath = use_multipath
self._linuxscsi = linuxscsi.LinuxSCSI(execute, root_helper)
self._linuxfc = linuxfc.LinuxFibreChannel(execute, root_helper)
@synchronized('connect_volume')
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for Fibre Channel must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
LOG.debug("execute = %s" % self._execute)
device_info = {'type': 'block'}
ports = connection_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(wwn)
elif isinstance(ports, str):
wwns.append(ports)
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = self._linuxfc.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
connection_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
LOG.warn(msg)
raise exceptions.NoFibreChannelHostsFound()
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.num_iscsi_scan_tries:
msg = _("Fibre Channel volume device not found.")
LOG.error(msg)
raise exceptions.NoFibreChannelVolumeDeviceFound()
LOG.warn(_("Fibre volume not yet found. "
"Will rescan & retry. Try number: %(tries)s"),
{'tries': tries})
self._linuxfc.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans)"),
{'name': self.device_name, 'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
if self.use_multipath:
mdev_info = self._linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
device_path = mdev_info['device']
devices = mdev_info['devices']
device_info['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
else:
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
device_info['path'] = device_path
device_info['devices'] = devices
return device_info
@synchronized('connect_volume')
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for Fibre Channel must include:
target_wwn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
devices = device_info['devices']
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if self.use_multipath and 'multipath_id' in device_info:
multipath_id = device_info['multipath_id']
mdev_info = self._linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s" % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
self._linuxscsi.remove_scsi_device(device["device"])
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six.moves.cPickle as pickle
import os
import signal
import sys
import time
from swift import gettext_ as _
from random import random
from eventlet import spawn, Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_drive
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, ratelimit_sleep, eventlet_monkey_patch
from swift.common.daemon import Daemon
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import split_policy_string, PolicyError
from swift.obj.diskfile import get_tmp_dir, ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
class SweepStats(object):
"""
Stats bucket for an update sweep
"""
def __init__(self, errors=0, failures=0, quarantines=0, successes=0,
unlinks=0):
self.errors = errors
self.failures = failures
self.quarantines = quarantines
self.successes = successes
self.unlinks = unlinks
def copy(self):
return type(self)(self.errors, self.failures, self.quarantines,
self.successes, self.unlinks)
def since(self, other):
return type(self)(self.errors - other.errors,
self.failures - other.failures,
self.quarantines - other.quarantines,
self.successes - other.successes,
self.unlinks - other.unlinks)
def reset(self):
self.errors = 0
self.failures = 0
self.quarantines = 0
self.successes = 0
self.unlinks = 0
def __str__(self):
keys = (
(self.successes, 'successes'),
(self.failures, 'failures'),
(self.quarantines, 'quarantines'),
(self.unlinks, 'unlinks'),
(self.errors, 'errors'),
)
return ', '.join('%d %s' % pair for pair in keys)
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
if 'slowdown' in conf:
self.logger.warning(
'The slowdown option is deprecated in favor of '
'objects_per_second. This option may be ignored in a '
'future release.')
objects_per_second = 1 / (
float(conf.get('slowdown', '0.01')) + 0.01)
else:
objects_per_second = 50
self.objects_running_time = 0
self.max_objects_per_second = \
float(conf.get('objects_per_second',
objects_per_second))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.report_interval = float(conf.get('report_interval', 300))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
self.stats = SweepStats()
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error(_('ERROR: Unable to access %(path)s: '
'%(error)s') %
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
if not check_drive(self.devices, device, self.mount_check):
# We don't count this as an error. The occasional
# unmounted drive is part of normal cluster operations,
# so a simple warning is sufficient.
self.logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.stats.reset()
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(
('Object update sweep of %(device)s '
'completed: %(elapsed).02fs, %(stats)s'),
{'device': device, 'elapsed': elapsed,
'stats': self.stats})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.stats.reset()
for device in self._listdir(self.devices):
if not check_drive(self.devices, device, self.mount_check):
# We don't count this as an error. The occasional unmounted
# drive is part of normal cluster operations, so a simple
# warning is sufficient.
self.logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(
('Object update single-threaded sweep completed: '
'%(elapsed).02fs, %(stats)s'),
{'elapsed': elapsed, 'stats': self.stats})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
last_status_update = start_time
start_stats = self.stats.copy()
my_pid = os.getpid()
self.logger.info("Object update sweep starting on %s (pid: %d)",
device, my_pid)
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not os.path.isdir(async_pending):
continue
if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc.
continue
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
# This isn't an error, but a misconfiguration. Logging a
# warning should be sufficient.
self.logger.warning(_('Directory %(directory)r does not map '
'to a valid policy (%(error)s)') % {
'directory': asyncdir, 'error': e})
continue
for prefix in self._listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(self._listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error(
_('ERROR async pending file with unexpected '
'name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
else:
self.process_object_update(update_path, device,
policy)
last_obj_hash = obj_hash
self.objects_running_time = ratelimit_sleep(
self.objects_running_time,
self.max_objects_per_second)
now = time.time()
if now - last_status_update >= self.report_interval:
this_sweep = self.stats.since(start_stats)
self.logger.info(
('Object update sweep progress on %(device)s: '
'%(elapsed).02fs, %(stats)s (pid: %(pid)d)'),
{'device': device,
'elapsed': now - start_time,
'pid': my_pid,
'stats': this_sweep})
last_status_update = now
try:
os.rmdir(prefix_path)
except OSError:
pass
self.logger.timing_since('timing', start_time)
sweep_totals = self.stats.since(start_stats)
self.logger.info(
('Object update sweep completed on %(device)s '
'in %(elapsed).02fs seconds:, '
'%(successes)d successes, %(failures)d failures, '
'%(quarantines)d quarantines, '
'%(unlinks)d unlinks, %(errors)d errors '
'(pid: %(pid)d)'),
{'device': device,
'elapsed': time.time() - start_time,
'pid': my_pid,
'successes': sweep_totals.successes,
'failures': sweep_totals.failures,
'quarantines': sweep_totals.quarantines,
'unlinks': sweep_totals.unlinks,
'errors': sweep_totals.errors})
def process_object_update(self, update_path, device, policy):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.stats.quarantines += 1
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
headers_out = HeaderKeyDict(update['headers'])
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = False
for event in events:
event_success, node_id = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if success:
self.stats.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
else:
self.stats.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
success = is_success(resp.status)
if not success:
self.logger.debug(
_('Error code %(status)d is returned from remote '
'server %(ip)s: %(port)s / %(device)s'),
{'status': resp.status, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
return (success, node['id'])
except (Exception, Timeout):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return HTTP_INTERNAL_SERVER_ERROR, node['id']
|
|
########################################################################
# Author: J. Kelly Flanagan
# Copyright (c) 2015
#
# myDoorbellVolume.py reads the downloaded configuration file and sets
# the mixers volume to the values from the configuration
#
# This program is intended to be started each minute by cron and either
# exits due to success or failure.
#
# myDoorbellConfig.py attempts to do the following:
#
# 1. open the file myDoorbellHomeDir/myDoorbell/myDoorbellInit and read its
# content into a dicitonary. This file contains cofiguration server name,
# keys and other necessary credentials. If this file doesn't exist we exit
# with an error.
# 2. open the file myDoorbellHomeDir/myDoorbell/myDoorbellConfig and read its
# content into a dictionary. If the file does not exist a empty dictionary
# is used.
# 3. use configuration server name, rid, eci, and other items to form
# a url for requesting the configuration information from the
# configuration server. If an exception occurs simply exit and we'll
# try again later.
# 4. if success, otherwise exit and we'll try again later.
# 5. get returned JSON config into dictionary. If ringtones are required
# download them. If ringtones are acquired acknowledge the download.
# Compare config to previous version with the exception of the ringtone
# elements. If they differ store the results into a temporary file
# and then use rename to make an atomic update. If they do not differ
# simply exit.
########################################################################
#!/usr/local/bin/python
import requests
import os
import ast
import json
import time
import signal
# keys = home_dir, config_server, eci, rid
myDoorbellInit = {}
myDoorbellHomeDir = '/home'
# fill myDoorbellInit global dictionary
def get_init():
global myDoorbellInit
global myDoorbellHomeDir
try:
kf = open(myDoorbellHomeDir + '/myDoorbell/myDoorbellInit', 'r')
except IOError:
return False
else:
key_file = kf.read()
myDoorbellInit = ast.literal_eval(key_file)
kf.close()
return True
# end function
# let the ringtone server know that we acquired the ringtone so it
# can reset the flag in the cloud based config file. If it fails return False
def got_ringtone(bell):
# form headers for json
headers = {'content-type': 'application/json'}
# resource
resource = ("/sky/event/" + myDoorbellInit['eci'] + '/12345')
# JSON payload
payload = ({'_domain':'myDoorbell', '_type':'got' +
bell.capitalize() + 'Ringtone', '_async':'true'})
# server +
url = 'https://' + myDoorbellInit['config_server'] + resource
# make request
try:
response = requests.post(url, data=json.dumps(payload), headers=headers)
except requests.ConnectionError:
print "Connection error"
return False
except:
print "Other error"
return False
else:
if response.status_code == requests.codes.ok:
return True
else:
return False
# end function
# Acquire requested ringtone, write it to a temporary file, and rename it
# to make the transaction atomic, on success return True, on failure False
def get_ringtone(bell):
global myDoorbellHomeDir
# form url
url = ('https://' + myDoorbellInit['config_server'] +
'/sky/cloud/' + myDoorbellInit['rid'] +
'/myDoorbellRingtone?door=' + bell +
'&_eci=' + myDoorbellInit['eci'])
# make request
try:
r = requests.get(url)
except requests.ConnectionError:
print "Connection error"
return False
except:
print "Other error"
return False
else:
if r.status_code == requests.codes.ok:
# acquire dictionary
new_dict = r.json()
# extract JSON value as unicode string
ustr = new_dict['ringtone_file']
# encode string with latin1, this permits writing raw files
hex_str = ustr.encode('latin1')
# write file to $HOME/myDoorbell/myDoorbellRingtone[bell].tmp
# for filename
fn = (myDoorbellHomeDir +
'/myDoorbell/myDoorbellRingtone' + bell.capitalize())
tmp = fn + '.tmp'
try:
f = open(tmp,'wb')
except IOError:
print "Error opening tmp ringtone file for writing"
return False
else:
f.write(hex_str)
f.close()
# move file atomically
os.rename(tmp, fn)
print time.asctime( time.localtime(time.time())), 'Update %s ringtone' % bell
return True
else:
print "Bad response"
return False
return False
# get_rintones checks for the need, acquires the ringtone,
# acknowledges it, and returns
def get_ringtones(dict):
ret_val = False
if dict['ringtone_new_front'] == 'true':
ret_val = True
# reset flag indicating need. If acquisition fails we'll catch
# it on the next iteration
if get_ringtone('front') == True:
if got_ringtone('front') == False:
print 'Failure to ack front ringtone'
else:
print 'Failed to get front ringtone'
if dict['ringtone_new_rear'] == 'true':
ret_val = True
# reset flag indicating need. If acquisition fails we'll catch
# it on the next iteration
if get_ringtone('rear') == True:
if got_ringtone('rear') == False:
print 'Failure to ack rear ringtone'
else:
print 'Failed to get rear ringtone'
return ret_val
# end of fucntion
# check to see if this program is already running. If it is then return
# True unless it has been doing so for more than 10 minutes. In that case
# kill it and return False.
def myDoorbell_is_running():
pf = '/tmp/myDoorbell.pid'
pid = str(os.getpid())
if os.path.isfile(pf):
# if pid file hasn't been updated for some time then delete it,
# run, and create a new one
age = time.time() - os.path.getmtime(pf)
if age > 600: # 10 minutes
# get previous pid
opid = open(pf).read()
# delete PID file
try:
os.kill(int(opid), signal.SIGKILL)
except:
print "exception on kill"
# write file
print time.asctime(time.localtime(time.time())), 'Killed'
file(pf, 'w').write(pid)
return False
else:
print "%s already exists" % pf, time.asctime(time.localtime(time.time()))
return True
else:
file(pf, 'w').write(pid)
return False
# end of function
# Program execution begins here
# check to see if we're running
if myDoorbell_is_running():
exit(0)
# get eci, rid, server name, and home. into global dictionary
# named myDoorbellInit
if get_init() == False:
print "Can't open initialization file %s" % (home_dir + '/myDoorbell/myDoorbellInit')
print 'removing pid file'
os.remove('/tmp/myDoorbell.pid')
exit(-1)
# read in the configuration file if it exists and store in a dictionary
# if it doesn't exist then create an empty dictionary
try:
f = open(myDoorbellHomeDir + '/myDoorbell/myDoorbellConfig', 'r')
except IOError:
old_dict = {}
else:
my_string = f.read()
old_dict = ast.literal_eval(my_string)
f.close()
# get the configuration from the config server
# form url
url = ('https://' + myDoorbellInit['config_server'] +
'/sky/cloud/' + myDoorbellInit['rid'] +
'/myDoorbellConfig?_eci=' + myDoorbellInit['eci'])
try:
response = requests.get(url)
except requests.ConnectionError:
print "Connection error"
except:
print "Other error"
else:
if response.status_code == requests.codes.ok:
# acquire dictionary
new_dict = response.json()
# act on configuration data
if get_ringtones(new_dict):
new_dict['ringtone_new_front'] = 'false'
new_dict['ringtone_new_rear'] = 'false'
if cmp(new_dict, old_dict) != 0:
# write config file to $HOME/myDoorbell/myDoorbellConfig.tmp
try:
f = open(myDoorbellHomeDir + '/myDoorbell/myDoorbellConfig.tmp','w')
except IOError:
print "Error opening config file for writing"
else:
f.write(str(new_dict))
f.close()
os.rename(myDoorbellHomeDir + '/myDoorbell/myDoorbellConfig.tmp',
myDoorbellHomeDir + '/myDoorbell/myDoorbellConfig')
old_dict = new_dict
print time.asctime( time.localtime(time.time())), "Update"
else:
print time.asctime( time.localtime(time.time())), "Success"
else:
print "Bad response"
#clean up before we leave
os.remove('/tmp/myDoorbell.pid')
|
|
#! /usr/bin/env python
'''Image/Video utilities'''
try:
import cv2
opencvAvailable = True
except ImportError:
print('OpenCV library could not be loaded (video replay functions will not be available)') # TODO change to logging module
opencvAvailable = False
try:
import skimage
skimageAvailable = True
except ImportError:
print('Scikit-image library could not be loaded (HoG-based classification methods will not be available)')
skimageAvailable = False
from sys import stdout
import utils
#import aggdraw # agg on top of PIL (antialiased drawing)
#import utils
__metaclass__ = type
cvRed = (0,0,255)
cvGreen = (0,255,0)
cvBlue = (255,0,0)
cvColors = utils.PlottingPropertyValues([cvRed,
cvGreen,
cvBlue])
def quitKey(key):
return chr(key&255)== 'q' or chr(key&255) == 'Q'
def saveKey(key):
return chr(key&255) == 's'
def int2FOURCC(x):
fourcc = ''
for i in xrange(4):
fourcc += unichr((x >> 8*i)&255)
return fourcc
def plotLines(filename, origins, destinations, w = 1, resultFilename='image.png'):
'''Draws lines over the image '''
import Image, ImageDraw # PIL
img = Image.open(filename)
draw = ImageDraw.Draw(img)
#draw = aggdraw.Draw(img)
#pen = aggdraw.Pen("red", width)
for p1, p2 in zip(origins, destinations):
draw.line([p1.x, p1.y, p2.x, p2.y], width = w, fill = (256,0,0))
#draw.line([p1.x, p1.y, p2.x, p2.y], pen)
del draw
#out = utils.openCheck(resultFilename)
img.save(resultFilename)
def matlab2PointCorrespondences(filename):
'''Loads and converts the point correspondences saved
by the matlab camera calibration tool'''
from numpy.lib.io import loadtxt, savetxt
from numpy.lib.function_base import append
points = loadtxt(filename, delimiter=',')
savetxt(utils.removeExtension(filename)+'-point-correspondences.txt',append(points[:,:2].T, points[:,3:].T, axis=0))
def loadPointCorrespondences(filename):
'''Loads and returns the corresponding points in world (first 2 lines) and image spaces (last 2 lines)'''
from numpy.lib.npyio import loadtxt
from numpy import float32
points = loadtxt(filename, dtype=float32)
return (points[:2,:].T, points[2:,:].T) # (world points, image points)
def cvMatToArray(cvmat):
'''Converts an OpenCV CvMat to numpy array.'''
print('Deprecated, use new interface')
from numpy.core.multiarray import zeros
a = zeros((cvmat.rows, cvmat.cols))#array([[0.0]*cvmat.width]*cvmat.height)
for i in xrange(cvmat.rows):
for j in xrange(cvmat.cols):
a[i,j] = cvmat[i,j]
return a
if opencvAvailable:
def computeHomography(srcPoints, dstPoints, method=0, ransacReprojThreshold=3.0):
'''Returns the homography matrix mapping from srcPoints to dstPoints (dimension Nx2)'''
H, mask = cv2.findHomography(srcPoints, dstPoints, method, ransacReprojThreshold)
return H
def arrayToCvMat(a, t = cv2.CV_64FC1):
'''Converts a numpy array to an OpenCV CvMat, with default type CV_64FC1.'''
print('Deprecated, use new interface')
cvmat = cv2.cv.CreateMat(a.shape[0], a.shape[1], t)
for i in range(cvmat.rows):
for j in range(cvmat.cols):
cvmat[i,j] = a[i,j]
return cvmat
def cvPlot(img, positions, color, lastCoordinate = None, **kwargs):
last = lastCoordinate+1
if lastCoordinate is not None and lastCoordinate >=0:
last = min(positions.length()-1, lastCoordinate)
for i in range(0, last-1):
cv2.line(img, positions[i].asint().astuple(), positions[i+1].asint().astuple(), color, **kwargs)
def cvImshow(windowName, img, rescale = 1.0):
'Rescales the image (in particular if too large)'
from cv2 import resize
if rescale != 1.:
size = (int(round(img.shape[1]*rescale)), int(round(img.shape[0]*rescale)))
resizedImg = resize(img, size)
cv2.imshow(windowName, resizedImg)
else:
cv2.imshow(windowName, img)
def computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients):
from copy import deepcopy
from numpy import identity, array
newImgSize = (int(round(width*undistortedImageMultiplication)), int(round(height*undistortedImageMultiplication)))
newCameraMatrix = deepcopy(intrinsicCameraMatrix)
newCameraMatrix[0,2] = newImgSize[0]/2.
newCameraMatrix[1,2] = newImgSize[1]/2.
return cv2.initUndistortRectifyMap(intrinsicCameraMatrix, array(distortionCoefficients), identity(3), newCameraMatrix, newImgSize, cv2.CV_32FC1)
def playVideo(filename, firstFrameNum = 0, frameRate = -1, interactive = False, printFrames = True, text = None, rescale = 1., step = 1):
'''Plays the video'''
windowName = 'frame'
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
wait = 5
if frameRate > 0:
wait = int(round(1000./frameRate))
if interactive:
wait = 0
capture = cv2.VideoCapture(filename)
if capture.isOpened():
key = -1
ret = True
frameNum = firstFrameNum
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
while ret and not quitKey(key):
#ret, img = capture.read()
for i in xrange(step):
ret, img = capture.read()
if ret:
if printFrames:
print('frame {0}'.format(frameNum))
frameNum+=step
if text is not None:
cv2.putText(img, text, (10,50), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
cvImshow(windowName, img, rescale)
key = cv2.waitKey(wait)
if saveKey(key):
cv2.imwrite('image-{}.png'.format(frameNum), img)
cv2.destroyAllWindows()
else:
print('Video capture for {} failed'.format(filename))
def infoVideo(filename):
'''Provides all available info on video '''
cvPropertyNames = {cv2.cv.CV_CAP_PROP_FORMAT: "format",
cv2.cv.CV_CAP_PROP_FOURCC: "codec (fourcc)",
cv2.cv.CV_CAP_PROP_FPS: "fps",
cv2.cv.CV_CAP_PROP_FRAME_COUNT: "number of frames",
cv2.cv.CV_CAP_PROP_FRAME_HEIGHT: "heigh",
cv2.cv.CV_CAP_PROP_FRAME_WIDTH: "width",
cv2.cv.CV_CAP_PROP_RECTIFICATION: "rectification",
cv2.cv.CV_CAP_PROP_SATURATION: "saturation"}
capture = cv2.VideoCapture(filename)
if capture.isOpened():
for cvprop in [#cv2.cv.CV_CAP_PROP_BRIGHTNESS
#cv2.cv.CV_CAP_PROP_CONTRAST
#cv2.cv.CV_CAP_PROP_CONVERT_RGB
#cv2.cv.CV_CAP_PROP_EXPOSURE
cv2.cv.CV_CAP_PROP_FORMAT,
cv2.cv.CV_CAP_PROP_FOURCC,
cv2.cv.CV_CAP_PROP_FPS,
cv2.cv.CV_CAP_PROP_FRAME_COUNT,
cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,
cv2.cv.CV_CAP_PROP_FRAME_WIDTH,
#cv2.cv.CV_CAP_PROP_GAIN,
#cv2.cv.CV_CAP_PROP_HUE
#cv2.cv.CV_CAP_PROP_MODE
#cv2.cv.CV_CAP_PROP_POS_AVI_RATIO
#cv2.cv.CV_CAP_PROP_POS_FRAMES
#cv2.cv.CV_CAP_PROP_POS_MSEC
#cv2.cv.CV_CAP_PROP_RECTIFICATION,
#cv2.cv.CV_CAP_PROP_SATURATION
]:
prop = capture.get(cvprop)
if cvprop == cv2.cv.CV_CAP_PROP_FOURCC and prop > 0:
prop = int2FOURCC(int(prop))
print('Video {}: {}'.format(cvPropertyNames[cvprop], prop))
else:
print('Video capture for {} failed'.format(filename))
def getImagesFromVideo(videoFilename, firstFrameNum = 0, nFrames = 1, saveImage = False, outputPrefix = 'image'):
'''Returns nFrames images from the video sequence'''
from math import floor, log10
images = []
capture = cv2.VideoCapture(videoFilename)
if capture.isOpened():
nDigits = int(floor(log10(capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))))+1
ret = False
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
imgNum = 0
while imgNum<nFrames:
ret, img = capture.read()
i = 0
while not ret and i<10:
ret, img = capture.read()
i += 1
if img.size>0:
if saveImage:
imgNumStr = format(firstFrameNum+imgNum, '0{}d'.format(nDigits))
cv2.imwrite(outputPrefix+imgNumStr+'.png', img)
else:
images.append(img)
imgNum +=1
capture.release()
else:
print('Video capture for {} failed'.format(videoFilename))
return images
def getFPS(videoFilename):
capture = cv2.VideoCapture(videoFilename)
if capture.isOpened():
fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)
capture.release()
return fps
else:
print('Video capture for {} failed'.format(videoFilename))
return None
def imageBox(img, obj, frameNum, homography, width, height, px = 0.2, py = 0.2, pixelThreshold = 800):
'Computes the bounding box of object at frameNum'
x = []
y = []
if obj.hasFeatures():
for f in obj.getFeatures():
if f.existsAtInstant(frameNum):
projectedPosition = f.getPositionAtInstant(frameNum).project(homography)
x.append(projectedPosition.x)
y.append(projectedPosition.y)
xmin = min(x)
xmax = max(x)
ymin = min(y)
ymax = max(y)
xMm = px * (xmax - xmin)
yMm = py * (ymax - ymin)
a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
yCropMin = int(max(0, .5 * (ymin + ymax - a)))
yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
xCropMin = int(max(0, .5 * (xmin + xmax - a)))
xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
if yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > pixelThreshold:
croppedImg = img[yCropMin : yCropMax, xCropMin : xCropMax]
else:
croppedImg = []
return croppedImg, yCropMin, yCropMax, xCropMin, xCropMax
def displayTrajectories(videoFilename, objects, boundingBoxes = {}, homography = None, firstFrameNum = 0, lastFrameNumArg = None, printFrames = True, rescale = 1., nFramesStep = 1, saveAllImages = False, undistort = False, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1.):
'''Displays the objects overlaid frame by frame over the video '''
from moving import userTypeNames
from math import ceil, log10
capture = cv2.VideoCapture(videoFilename)
width = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
windowName = 'frame'
#cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
if undistort: # setup undistortion
[map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
if capture.isOpened():
key = -1
ret = True
frameNum = firstFrameNum
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, firstFrameNum)
if lastFrameNumArg is None:
from sys import maxint
lastFrameNum = maxint
else:
lastFrameNum = lastFrameNumArg
nZerosFilename = int(ceil(log10(lastFrameNum)))
while ret and not quitKey(key) and frameNum <= lastFrameNum:
ret, img = capture.read()
if ret:
if undistort:
img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
if printFrames:
print('frame {0}'.format(frameNum))
for obj in objects:
if obj.existsAtInstant(frameNum):
if not hasattr(obj, 'projectedPositions'):
if homography is not None:
obj.projectedPositions = obj.positions.project(homography)
else:
obj.projectedPositions = obj.positions
cvPlot(img, obj.projectedPositions, cvRed, frameNum-obj.getFirstInstant())
if frameNum in boundingBoxes.keys():
for rect in boundingBoxes[frameNum]:
cv2.rectangle(img, rect[0].asint().astuple(), rect[1].asint().astuple(), cvRed)
elif obj.hasFeatures():
imgcrop, yCropMin, yCropMax, xCropMin, xCropMax = imageBox(img, obj, frameNum, homography, width, height)
cv2.rectangle(img, (xCropMin, yCropMin), (xCropMax, yCropMax), cvBlue, 1)
objDescription = '{} '.format(obj.num)
if userTypeNames[obj.userType] != 'unknown':
objDescription += userTypeNames[obj.userType][0].upper()
cv2.putText(img, objDescription, obj.projectedPositions[frameNum-obj.getFirstInstant()].asint().astuple(), cv2.cv.CV_FONT_HERSHEY_PLAIN, 1, cvRed)
if not saveAllImages:
cvImshow(windowName, img, rescale)
key = cv2.waitKey()
if saveAllImages or saveKey(key):
cv2.imwrite('image-{{:0{}}}.png'.format(nZerosFilename).format(frameNum), img)
frameNum += nFramesStep
if nFramesStep > 1:
capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
cv2.destroyAllWindows()
else:
print 'Cannot load file ' + videoFilename
def computeHomographyFromPDTV(camera):
'''Returns the homography matrix at ground level from PDTV camera
https://bitbucket.org/hakanardo/pdtv'''
from numpy import array
# camera = pdtv.load(cameraFilename)
srcPoints = [[x,y] for x, y in zip([1.,2.,2.,1.],[1.,1.,2.,2.])] # need floats!!
dstPoints = []
for srcPoint in srcPoints:
projected = camera.image_to_world(tuple(srcPoint))
dstPoints.append([projected[0], projected[1]])
H, mask = cv2.findHomography(array(srcPoints), array(dstPoints), method = 0) # No need for different methods for finding homography
return H
def undistortedCoordinates(map1, map2, x, y, maxDistance = 1.):
'''Returns the coordinates of a point in undistorted image
map1 and map2 are the mapping functions from undistorted image
to distorted (original image)
map1(x,y) = originalx, originaly'''
from numpy import abs, logical_and, unravel_index, dot, sum
from matplotlib.mlab import find
distx = abs(map1-x)
disty = abs(map2-y)
indices = logical_and(distx<maxDistance, disty<maxDistance)
closeCoordinates = unravel_index(find(indices), distx.shape) # returns i,j, ie y,x
xWeights = 1-distx[indices]
yWeights = 1-disty[indices]
return dot(xWeights, closeCoordinates[1])/sum(xWeights), dot(yWeights, closeCoordinates[0])/sum(yWeights)
def undistortTrajectoryFromCVMapping(map1, map2, t):
'''test 'perfect' inversion'''
from moving import Trajectory
from numpy import isnan
undistortedTrajectory = Trajectory()
for i,p in enumerate(t):
res = undistortedCoordinates(map1, map2, p.x,p.y)
if not isnan(res).any():
undistortedTrajectory.addPositionXY(res[0], res[1])
else:
print i,p,res
return undistortedTrajectory
def computeInverseMapping(originalImageSize, map1, map2):
'Computes inverse mapping from maps provided by cv2.initUndistortRectifyMap'
from numpy import ones, isnan
invMap1 = -ones(originalImageSize)
invMap2 = -ones(originalImageSize)
for x in range(0,originalImageSize[1]):
for y in range(0,originalImageSize[0]):
res = undistortedCoordinates(x,y, map1, map2)
if not isnan(res).any():
invMap1[y,x] = res[0]
invMap2[y,x] = res[1]
return invMap1, invMap2
def cameraIntrinsicCalibration(path, checkerBoardSize=[6,7], secondPassSearch=False, display=False):
''' Camera calibration searches through all the images (jpg or png) located
in _path_ for matches to a checkerboard pattern of size checkboardSize.
These images should all be of the same camera with the same resolution.
For best results, use an asymetric board and ensure that the image has
very high contrast, including the background. Suitable checkerboard:
http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
The code below is based off of:
https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
Modified by Paul St-Aubin
'''
from numpy import zeros, mgrid, float32, savetxt
import glob, os
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = zeros((checkerBoardSize[0]*checkerBoardSize[1],3), float32)
objp[:,:2] = mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
## Loop throuhg all images in _path_
images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
# If found, add object points, image points (after refining them)
if ret:
print 'Found pattern in '+fname
if(secondPassSearch):
corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
if(display):
img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
if(img):
cv2.imshow('img',img)
cv2.waitKey(0)
## Close up image loading and calibrate
cv2.destroyAllWindows()
if len(objpoints) == 0 or len(imgpoints) == 0:
return False
try:
ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
except NameError:
return False
savetxt('intrinsic-camera.txt', camera_matrix)
return camera_matrix, dist_coeffs
def undistortImage(img, intrinsicCameraMatrix = None, distortionCoefficients = None, undistortedImageMultiplication = 1., interpolation=cv2.INTER_LINEAR):
'''Undistorts the image passed in argument'''
width = img.shape[1]
height = img.shape[0]
[map1, map2] = computeUndistortMaps(width, height, undistortedImageMultiplication, intrinsicCameraMatrix, distortionCoefficients)
return cv2.remap(img, map1, map2, interpolation=interpolation)
def printCvMat(cvmat, out = stdout):
'''Prints the cvmat to out'''
print('Deprecated, use new interface')
for i in xrange(cvmat.rows):
for j in xrange(cvmat.cols):
out.write('{0} '.format(cvmat[i,j]))
out.write('\n')
def projectArray(homography, points):
'''Returns the coordinates of the projected points through homography
(format: array 2xN points)'''
from numpy.core import dot
from numpy.core.multiarray import array
from numpy.lib.function_base import append
if points.shape[0] != 2:
raise Exception('points of dimension {0} {1}'.format(points.shape[0], points.shape[1]))
if (homography is not None) and homography.size>0:
#alternatively, on could use cv2.convertpointstohomogeneous and other conversion to/from homogeneous coordinates
augmentedPoints = append(points,[[1]*points.shape[1]], 0)
prod = dot(homography, augmentedPoints)
return prod[0:2]/prod[2]
else:
return points
def project(homography, p):
'''Returns the coordinates of the projection of the point p with coordinates p[0], p[1]
through homography'''
from numpy import array
return projectArray(homography, array([[p[0]],[p[1]]]))
def projectTrajectory(homography, trajectory):
'''Projects a series of points in the format
[[x1, x2, ...],
[y1, y2, ...]]'''
from numpy.core.multiarray import array
return projectArray(homography, array(trajectory))
def invertHomography(homography):
'''Returns an inverted homography
Unnecessary for reprojection over camera image'''
from numpy.linalg.linalg import inv
invH = inv(homography)
invH /= invH[2,2]
return invH
def undistortTrajectory(invMap1, invMap2, positions):
from numpy import floor, ceil
floorPositions = floor(positions)
#ceilPositions = ceil(positions)
undistortedTrajectory = [[],[]]
for i in xrange(len(positions[0])):
x,y = None, None
if positions[0][i]+1 < invMap1.shape[1] and positions[1][i]+1 < invMap1.shape[0]:
floorX = invMap1[floorPositions[1][i], floorPositions[0][i]]
floorY = invMap2[floorPositions[1][i], floorPositions[0][i]]
ceilX = invMap1[floorPositions[1][i]+1, floorPositions[0][i]+1]
ceilY = invMap2[floorPositions[1][i]+1, floorPositions[0][i]+1]
#ceilX = invMap1[ceilPositions[1][i], ceilPositions[0][i]]
#ceilY = invMap2[ceilPositions[1][i], ceilPositions[0][i]]
if floorX >=0 and floorY >=0 and ceilX >=0 and ceilY >=0:
x = floorX+(positions[0][i]-floorPositions[0][i])*(ceilX-floorX)
y = floorY+(positions[1][i]-floorPositions[1][i])*(ceilY-floorY)
undistortedTrajectory[0].append(x)
undistortedTrajectory[1].append(y)
return undistortedTrajectory
def projectGInputPoints(homography, points):
from numpy import array
return projectTrajectory(homography, array(points+[points[0]]).T)
if opencvAvailable:
def computeTranslation(img1, img2, img1Points, maxTranslation2, minNMatches, windowSize = (5,5), level = 5, criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)):
'''Computes the translation of img2 with respect to img1
(loaded using OpenCV as numpy arrays)
img1Points are used to compute the translation
TODO add diagnostic if data is all over the place, and it most likely is not a translation (eg zoom, other non linear distortion)'''
from numpy.core.multiarray import array
from numpy.lib.function_base import median
from numpy.core.fromnumeric import sum
nextPoints = array([])
(img2Points, status, track_error) = cv2.calcOpticalFlowPyrLK(img1, img2, img1Points, nextPoints, winSize=windowSize, maxLevel=level, criteria=criteria)
# calcOpticalFlowPyrLK(prevImg, nextImg, prevPts[, nextPts[, status[, err[, winSize[, maxLevel[, criteria[, derivLambda[, flags]]]]]]]]) -> nextPts, status, err
delta = []
for (k, (p1,p2)) in enumerate(zip(img1Points, img2Points)):
if status[k] == 1:
dp = p2-p1
d = sum(dp**2)
if d < maxTranslation2:
delta.append(dp)
if len(delta) >= minNMatches:
return median(delta, axis=0)
else:
print(dp)
return None
if skimageAvailable:
def HOG(image, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
from skimage.feature import hog
from skimage import color, transform
bwImg = color.rgb2gray(image)
inputImg = transform.resize(bwImg, rescaleSize)
features = hog(inputImg, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
if visualize:
from matplotlib.pyplot import imshow, figure, subplot
hogViz = features[1]
features = features[0]
figure()
subplot(1,2,1)
imshow(img)
subplot(1,2,2)
imshow(hogViz)
return features
def createHOGTrainingSet(imageDirectory, classLabel, rescaleSize = (64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), visualize=False, normalize=False):
from os import listdir
from numpy import array, float32
from matplotlib.pyplot import imread
inputData = []
for filename in listdir(imageDirectory):
img = imread(imageDirectory+filename)
features = HOG(img, rescaleSize, orientations, pixelsPerCell, cellsPerBlock, visualize, normalize)
inputData.append(features)
nImages = len(inputData)
return array(inputData, dtype = float32), array([classLabel]*nImages, dtype = float32)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.qtum import *
from test_framework.qtumconfig import *
import sys
class OpCallTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-txindex=1']]*2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def send_one_op_call_tx_with_counter_check(self, outputs, counter_should_increase_by=0, input_value=500000000, should_throw=False):
# 61bc221a counter()
old_out = int(self.node.callcontract(self.contract_address, "61bc221a")['executionResult']['output'], 16)
inpt = make_vin(self.node, input_value)
tx = make_transaction(self.node, [inpt], outputs)
if should_throw:
try:
self.node.sendrawtransaction(tx)
assert(False)
except JSONRPCException as e:
print(e)
pass
else:
self.node.sendrawtransaction(tx)
self.node.generate(1)
sync_blocks(self.nodes)
for i in range(2):
# 61bc221a counter()
out = int(self.nodes[i].callcontract(self.contract_address, "61bc221a")['executionResult']['output'], 16)
assert(out-old_out == counter_should_increase_by)
def send_multiple_op_call_txs_with_counter_check(self, num_txs, outputs, counter_should_increase_by):
# 61bc221a counter()
old_out = int(self.node.callcontract(self.contract_address, "61bc221a")['executionResult']['output'], 16)
i = 0
unspents = self.node.listunspent()
while i < num_txs and len(unspents) > 0:
# Select as input a tx which has at least 5 qtum spendable
for tx_i in range(len(unspents)):
if int(unspents[tx_i]['amount']*COIN) == 1000000*QTUM_MIN_GAS_PRICE and unspents[tx_i]['spendable']:
break
else:
assert(False)
inpt = CTxIn(COutPoint(int(unspents[tx_i]['txid'], 16), unspents[tx_i]['vout']), nSequence=0)
tx = make_transaction(self.node, [inpt], outputs)
txid = self.node.sendrawtransaction(tx)
unspents = self.node.listunspent()
i += 1
self.node.generate(1)
sync_blocks(self.nodes)
for i in range(2):
# 61bc221a counter()
out = int(self.nodes[i].callcontract(self.contract_address, "61bc221a")['executionResult']['output'], 16)
assert(out-old_out == counter_should_increase_by)
# Deploy the testing contract
def create_contract_test(self):
"""
pragma solidity ^0.4.10;
contract Example {
uint public counter;
function inc() public {
counter += 1;
}
function getBalance() public {
return this.balance;
}
}
"""
contract_data = self.node.createcontract("6060604052341561000c57fe5b5b61011e8061001c6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806312065fe0146058578063371303c014607b57806361bc221a14608a578063d0e30db01460ad575bfe5b3415605f57fe5b606560b5565b6040518082815260200191505060405180910390f35b3415608257fe5b608860d5565b005b3415609157fe5b609760e9565b6040518082815260200191505060405180910390f35b60b360ef565b005b60003073ffffffffffffffffffffffffffffffffffffffff163190505b90565b60016000600082825401925050819055505b565b60005481565b5b5600a165627a7a72305820fe93d8cc66557a2a6c8347f481f6d334402a7f90f8b2288668a874c34416a4dc0029", 1000000)
self.contract_address = contract_data['address']
block_height = self.node.getblockcount()
self.node.generate(1)
sync_blocks(self.nodes)
for i in range(2):
assert(self.nodes[i].getblockcount() == block_height+1)
assert(len(self.nodes[i].listcontracts()) == 1+NUM_DEFAULT_DGP_CONTRACTS)
# Sends a tx containing 2 op_call outputs calling inc()
def many_calls_in_same_tx_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, counter_should_increase_by=2, input_value=2*1000000*QTUM_MIN_GAS_PRICE)
# Sends a normal raw op_call tx with a single output.
def normal_op_call_output_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", b"\xff\x7f", CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, counter_should_increase_by=1, input_value=0x7fff*QTUM_MIN_GAS_PRICE)
# Sends a tx containing 1 op_call output where txfee == gas_price*gas_limit.
def gas_equal_to_tx_fee_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, counter_should_increase_by=1, input_value=1000000*QTUM_MIN_GAS_PRICE)
# Sends a tx containing 1 op_call output where txfee < gas_price*gas_limit.
def gas_exceeding_tx_fee_100001_1_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(10000001), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, input_value=1000001*QTUM_MIN_GAS_PRICE-1, should_throw=True)
# Sends a tx containing 1 op_call output where txfee < gas_price*gas_limit.
def gas_exceeding_tx_fee_100001_2_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000001), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, input_value=1000000*QTUM_MIN_GAS_PRICE, should_throw=True)
# Sends a tx containing 2 op_call outputs that has a combined gas_price*gas_limit exceeding the tx fee.
# This tx should be rejected since executing such a tx would be unable to pay for its potential execution costs in the same way as a tx with one output where txfee < gas_price*gas_limit.
def two_calls_in_same_tx_exceeding_tx_fee_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, input_value=2000000*QTUM_MIN_GAS_PRICE-1, should_throw=True)
# sends a tx containing 1 op_call output with a (if interpreted with a signed integer) negative gas limit calling inc()
def gas_limit_signedness_test(self):
outputs = []
gas_limit = b"\xff"
while len(gas_limit) < 20:
outputs.append(make_op_call_output(0, b"\x04", gas_limit, CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, should_throw=True, input_value=min(max(int(bytes_to_hex_str(gas_limit), 16)*QTUM_MIN_GAS_PRICE, 10000000), 1000000000))
gas_limit += b"\xff"
# sends a tx containing 1 op_call output with a (if interpreted with a signed integer) negative gas limit calling inc()
def gas_limit_signedness_one_valid_test(self):
outputs = []
gas_limit = b"\xff"
outputs.append(make_op_call_output(0, b"\x04", b"\xff\xff\x00", CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
outputs.append(make_op_call_output(0, b"\x04", b"\xff\xff", CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, should_throw=True, input_value=2*0xffff*QTUM_MIN_GAS_PRICE)
# sends a tx containing 1 op_call output with a (if interpreted with a signed integer) negative gas price calling inc()
def gas_price_signedness_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", b"\x01\x00", b"\xff\xff", bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, should_throw=True, input_value=10000000)
# sends a tx containing 1 op_call output with a possible negative gas limit and price calling inc()
def gas_limit_and_price_signedness_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", b"\xff\xff", b"\xff", bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, should_throw=True, input_value=0xff*0xffff)
# Sends 100 valid op_call txs
def send_100_txs_test(self):
outputs = []
outputs.append(make_op_call_output(0, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("371303c0"), bytes.fromhex(self.contract_address)))
self.send_multiple_op_call_txs_with_counter_check(100, outputs, 100)
def send_tx_with_value_test(self):
outputs = []
# d0e30db0 deposit()
outputs.append(make_op_call_output(100000000, b"\x04", CScriptNum(1000000), CScriptNum(QTUM_MIN_GAS_PRICE), bytes.fromhex("d0e30db0"), bytes.fromhex(self.contract_address)))
self.send_one_op_call_tx_with_counter_check(outputs, counter_should_increase_by=0, input_value=100000000+1000000*QTUM_MIN_GAS_PRICE)
# 12065fe0 getBalance()
balance = int(self.node.callcontract(self.contract_address, "12065fe0")['executionResult']['output'], 16)
assert(balance == 100000000)
def run_test(self):
self.node = self.nodes[0]
connect_nodes(self.nodes[0], 1)
generatesynchronized(self.nodes[0], 200+COINBASE_MATURITY, None, self.nodes)
self.node.sendmany("", {self.node.getnewaddress(): 1000000*QTUM_MIN_GAS_PRICE / Decimal('100000000') for i in range(200)})
print("Creating contract")
self.create_contract_test()
print("Calling inc() in two outputs")
self.many_calls_in_same_tx_test()
print("Calling inc() in one output")
self.normal_op_call_output_test()
print("Calling inc() in one output with txfee equal to gas_limit*gas_price")
self.gas_equal_to_tx_fee_test()
print("Calling inc() in one output with txfee < gas_limit*gas_price")
self.gas_exceeding_tx_fee_100001_1_test()
print("Second test of inc() in one outputs with txfee < gas_limit*gas_price")
self.gas_exceeding_tx_fee_100001_2_test()
print("Second test of inc() in one output with txfee < gas_limit*gas_price")
self.two_calls_in_same_tx_exceeding_tx_fee_test()
print("Mining a block with 100 txs each with an output calling inc()")
self.send_100_txs_test()
print("Checking that the value of txs are correctly updated")
self.send_tx_with_value_test()
print("Checking gas limit signedness where one tx is valid")
self.gas_limit_signedness_one_valid_test()
print("Checking gas limit signedness")
self.gas_limit_signedness_test()
print("Checking gas price signedness")
self.gas_price_signedness_test()
print("Checking gas limit and gas price signedness")
self.gas_limit_and_price_signedness_test()
if __name__ == '__main__':
OpCallTest().main()
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo_config import cfg
from oslo_serialization import jsonutils
from cinder import quota
from cinder import rpc
from cinder.volume import utils
CONF = cfg.CONF
QUOTAS = quota.QUOTAS
class VolumeAPI(rpc.RPCAPI):
"""Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
1.17 - Add replica option to create_volume, promote_replica and
sync_replica.
1.18 - Adds create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, and delete_cgsnapshot. Also adds
the consistencygroup_id parameter in create_volume.
1.19 - Adds update_migrated_volume
1.20 - Adds support for sending objects over RPC in create_snapshot()
and delete_snapshot()
1.21 - Adds update_consistencygroup.
1.22 - Adds create_consistencygroup_from_src.
1.23 - Adds attachment_id to detach_volume.
1.24 - Removed duplicated parameters: snapshot_id, image_id,
source_volid, source_replicaid, consistencygroup_id and
cgsnapshot_id from create_volume. All off them are already
passed either in request_spec or available in the DB.
1.25 - Add source_cg to create_consistencygroup_from_src.
1.26 - Adds support for sending objects over RPC in
create_consistencygroup(), create_consistencygroup_from_src(),
update_consistencygroup() and delete_consistencygroup().
1.27 - Adds support for replication V2
1.28 - Adds manage_existing_snapshot
1.29 - Adds get_capabilities.
1.30 - Adds remove_export
1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot()
and delete_cgsnapshot() to cast method only with necessary
args. Forwarding CGSnapshot object instead of CGSnapshot_id.
1.32 - Adds support for sending objects over RPC in create_volume().
1.33 - Adds support for sending objects over RPC in delete_volume().
1.34 - Adds support for sending objects over RPC in retype().
1.35 - Adds support for sending objects over RPC in extend_volume().
1.36 - Adds support for sending objects over RPC in migrate_volume(),
migrate_volume_completion(), and update_migrated_volume().
1.37 - Adds old_reservations parameter to retype to support quota
checks in the API.
1.38 - Scaling backup service, add get_backup_device() and
secure_file_operations_enabled()
"""
RPC_API_VERSION = '1.38'
TOPIC = CONF.volume_topic
BINARY = 'cinder-volume'
def _get_cctxt(self, host, version):
new_host = utils.get_volume_rpc_host(host)
return self.client.prepare(server=new_host, version=version)
def create_consistencygroup(self, ctxt, group, host):
cctxt = self._get_cctxt(host, '1.26')
cctxt.cast(ctxt, 'create_consistencygroup',
group=group)
def delete_consistencygroup(self, ctxt, group):
cctxt = self._get_cctxt(group.host, '1.26')
cctxt.cast(ctxt, 'delete_consistencygroup',
group=group)
def update_consistencygroup(self, ctxt, group, add_volumes=None,
remove_volumes=None):
cctxt = self._get_cctxt(group.host, '1.26')
cctxt.cast(ctxt, 'update_consistencygroup',
group=group,
add_volumes=add_volumes,
remove_volumes=remove_volumes)
def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None,
source_cg=None):
cctxt = self._get_cctxt(group.host, '1.31')
cctxt.cast(ctxt, 'create_consistencygroup_from_src',
group=group,
cgsnapshot=cgsnapshot,
source_cg=source_cg)
def create_cgsnapshot(self, ctxt, cgsnapshot):
cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, '1.31')
cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot)
def delete_cgsnapshot(self, ctxt, cgsnapshot):
cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, '1.31')
cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot=cgsnapshot)
def create_volume(self, ctxt, volume, host, request_spec,
filter_properties, allow_reschedule=True):
request_spec_p = jsonutils.to_primitive(request_spec)
msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p,
'filter_properties': filter_properties,
'allow_reschedule': allow_reschedule}
if self.client.can_send_version('1.32'):
version = '1.32'
msg_args['volume'] = volume
else:
version = '1.24'
cctxt = self._get_cctxt(host, version)
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume', **msg_args)
def delete_volume(self, ctxt, volume, unmanage_only=False):
msg_args = {'volume_id': volume.id, 'unmanage_only': unmanage_only}
if self.client.can_send_version('1.33'):
version = '1.33'
msg_args['volume'] = volume
else:
version = '1.15'
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'delete_volume', **msg_args)
def create_snapshot(self, ctxt, volume, snapshot):
cctxt = self._get_cctxt(volume['host'], version='1.20')
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot=snapshot)
def delete_snapshot(self, ctxt, snapshot, host, unmanage_only=False):
cctxt = self._get_cctxt(host, version='1.20')
cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot,
unmanage_only=unmanage_only)
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
cctxt = self._get_cctxt(volume['host'], '1.11')
return cctxt.call(ctxt, 'attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint,
mode=mode)
def detach_volume(self, ctxt, volume, attachment_id):
cctxt = self._get_cctxt(volume['host'], '1.20')
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'],
attachment_id=attachment_id)
def copy_volume_to_image(self, ctxt, volume, image_meta):
cctxt = self._get_cctxt(volume['host'], '1.3')
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
cctxt = self._get_cctxt(volume['host'], version='1.0')
return cctxt.call(ctxt, 'initialize_connection',
volume_id=volume['id'],
connector=connector)
def terminate_connection(self, ctxt, volume, connector, force=False):
cctxt = self._get_cctxt(volume['host'], version='1.0')
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
def remove_export(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.30')
cctxt.cast(ctxt, 'remove_export', volume_id=volume['id'])
def publish_service_capabilities(self, ctxt):
cctxt = self.client.prepare(fanout=True, version='1.2')
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
cctxt = self._get_cctxt(volume['host'], '1.9')
return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
msg_args = {'volume_id': volume.id, 'new_size': new_size,
'reservations': reservations}
if self.client.can_send_version('1.35'):
version = '1.35'
msg_args['volume'] = volume
else:
version = '1.14'
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'extend_volume', **msg_args)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
msg_args = {'volume_id': volume.id, 'host': host_p,
'force_host_copy': force_host_copy}
if self.client.can_send_version('1.36'):
version = '1.36'
msg_args['volume'] = volume
else:
version = '1.8'
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'migrate_volume', **msg_args)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
msg_args = {'volume_id': volume.id, 'new_volume_id': new_volume.id,
'error': error}
if self.client.can_send_version('1.36'):
version = '1.36'
msg_args['volume'] = volume
msg_args['new_volume'] = new_volume
else:
version = '1.10'
cctxt = self._get_cctxt(volume.host, version)
return cctxt.call(ctxt, 'migrate_volume_completion', **msg_args)
def retype(self, ctxt, volume, new_type_id, dest_host,
migration_policy='never', reservations=None,
old_reservations=None):
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
msg_args = {'volume_id': volume.id, 'new_type_id': new_type_id,
'host': host_p, 'migration_policy': migration_policy,
'reservations': reservations}
if self.client.can_send_version('1.37'):
version = '1.37'
msg_args.update(volume=volume, old_reservations=old_reservations)
else:
if old_reservations is not None:
QUOTAS.rollback(ctxt, old_reservations)
if self.client.can_send_version('1.34'):
version = '1.34'
msg_args['volume'] = volume
else:
version = '1.12'
cctxt = self._get_cctxt(volume.host, version)
cctxt.cast(ctxt, 'retype', **msg_args)
def manage_existing(self, ctxt, volume, ref):
cctxt = self._get_cctxt(volume['host'], '1.15')
cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
def promote_replica(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.17')
cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id'])
def reenable_replication(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.17')
cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
cctxt = self._get_cctxt(new_volume['host'], '1.36')
cctxt.call(ctxt,
'update_migrated_volume',
volume=volume,
new_volume=new_volume,
volume_status=original_volume_status)
def enable_replication(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.27')
cctxt.cast(ctxt, 'enable_replication', volume=volume)
def disable_replication(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.27')
cctxt.cast(ctxt, 'disable_replication',
volume=volume)
def failover_replication(self,
ctxt,
volume,
secondary=None):
cctxt = self._get_cctxt(volume['host'], '1.27')
cctxt.cast(ctxt, 'failover_replication',
volume=volume,
secondary=secondary)
def list_replication_targets(self, ctxt, volume):
cctxt = self._get_cctxt(volume['host'], '1.27')
return cctxt.call(ctxt, 'list_replication_targets', volume=volume)
def manage_existing_snapshot(self, ctxt, snapshot, ref, host):
cctxt = self._get_cctxt(host, '1.28')
cctxt.cast(ctxt, 'manage_existing_snapshot',
snapshot=snapshot,
ref=ref)
def get_capabilities(self, ctxt, host, discover):
cctxt = self._get_cctxt(host, '1.29')
return cctxt.call(ctxt, 'get_capabilities', discover=discover)
def get_backup_device(self, ctxt, backup, volume):
new_host = utils.extract_host(volume.host)
cctxt = self.client.prepare(server=new_host, version='1.38')
return cctxt.call(ctxt, 'get_backup_device',
backup=backup)
def secure_file_operations_enabled(self, ctxt, volume):
new_host = utils.extract_host(volume.host)
cctxt = self.client.prepare(server=new_host, version='1.38')
return cctxt.call(ctxt, 'secure_file_operations_enabled',
volume=volume)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import logging
import quantities
import os
from rmgpy import settings
from rmgpy.molecule import Molecule
from rmgpy.quantity import Quantity
from rmgpy.solver.base import TerminationTime, TerminationConversion
from rmgpy.solver.simple import SimpleReactor
from rmgpy.solver.liquid import LiquidReactor
from model import CoreEdgeReactionModel
################################################################################
class InputError(Exception): pass
################################################################################
rmg = None
speciesDict = {}
def database(
thermoLibraries = None,
reactionLibraries = None,
frequenciesLibraries = None,
seedMechanisms = None,
kineticsFamilies = 'default',
kineticsDepositories = 'default',
kineticsEstimator = 'group additivity',
):
# This function just stores the information about the database to be loaded
# We don't actually load the database until after we're finished reading
# the input file
if isinstance(thermoLibraries, str): thermoLibraries = [thermoLibraries]
if isinstance(reactionLibraries, str): reactionLibraries = [reactionLibraries]
if isinstance(seedMechanisms, str): seedMechanisms = [seedMechanisms]
if isinstance(frequenciesLibraries, str): frequenciesLibraries = [frequenciesLibraries]
rmg.databaseDirectory = settings['database.directory']
rmg.thermoLibraries = thermoLibraries or []
rmg.reactionLibraries = reactionLibraries or []
rmg.seedMechanisms = seedMechanisms or []
rmg.statmechLibraries = frequenciesLibraries or []
rmg.kineticsEstimator = kineticsEstimator
if kineticsDepositories == 'default':
rmg.kineticsDepositories = ['training']
elif kineticsDepositories == 'all':
rmg.kineticsDepositories = None
else:
if not isinstance(kineticsDepositories,list):
raise InputError("kineticsDepositories should be either 'default', 'all', or a list of names eg. ['training','PrIMe'].")
rmg.kineticsDepositories = kineticsDepositories
if kineticsFamilies in ('default', 'all', 'none'):
rmg.kineticsFamilies = kineticsFamilies
else:
if not isinstance(kineticsFamilies,list):
raise InputError("kineticsFamilies should be either 'default', 'all', 'none', or a list of names eg. ['H_Abstraction','R_Recombination'] or ['!Intra_Disproportionation'].")
rmg.kineticsFamilies = kineticsFamilies
def species(label, structure, reactive=True):
logging.debug('Found {0} species "{1}" ({2})'.format('reactive' if reactive else 'nonreactive', label, structure.toSMILES()))
spec, isNew = rmg.reactionModel.makeNewSpecies(structure, label=label, reactive=reactive)
if not isNew:
raise InputError("Species {0} is a duplicate of {1}. Species in input file must be unique".format(label,spec.label))
rmg.initialSpecies.append(spec)
speciesDict[label] = spec
def SMARTS(string):
return Molecule().fromSMARTS(string)
def SMILES(string):
return Molecule().fromSMILES(string)
def InChI(string):
return Molecule().fromInChI(string)
def adjacencyList(string):
return Molecule().fromAdjacencyList(string)
# Reaction systems
def simpleReactor(temperature,
pressure,
initialMoleFractions,
terminationConversion=None,
terminationTime=None,
sensitivity=None,
sensitivityThreshold=1e-3
):
logging.debug('Found SimpleReactor reaction system')
for value in initialMoleFractions.values():
if value < 0:
raise InputError('Initial mole fractions cannot be negative.')
if sum(initialMoleFractions.values()) != 1:
logging.warning('Initial mole fractions do not sum to one; renormalizing.')
for spec in initialMoleFractions:
initialMoleFractions[spec] /= sum(initialMoleFractions.values())
T = Quantity(temperature)
P = Quantity(pressure)
termination = []
if terminationConversion is not None:
for spec, conv in terminationConversion.iteritems():
termination.append(TerminationConversion(speciesDict[spec], conv))
if terminationTime is not None:
termination.append(TerminationTime(Quantity(terminationTime)))
if len(termination) == 0:
raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2))
sensitiveSpecies = []
if sensitivity:
for spec in sensitivity:
sensitiveSpecies.append(speciesDict[spec])
system = SimpleReactor(T, P, initialMoleFractions, termination, sensitiveSpecies, sensitivityThreshold)
rmg.reactionSystems.append(system)
# Reaction systems
def liquidReactor(temperature,
initialConcentrations,
terminationConversion=None,
terminationTime=None,
sensitivity=None,
sensitivityThreshold=1e-3):
logging.debug('Found LiquidReactor reaction system')
T = Quantity(temperature)
for spec,conc in initialConcentrations.iteritems():
concentration = Quantity(conc)
# check the dimensions are ok
# convert to mol/m^3 (or something numerically nice? or must it be SI)
initialConcentrations[spec] = concentration.value_si
termination = []
if terminationConversion is not None:
for spec, conv in terminationConversion.iteritems():
termination.append(TerminationConversion(speciesDict[spec], conv))
if terminationTime is not None:
termination.append(TerminationTime(Quantity(terminationTime)))
if len(termination) == 0:
raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2))
sensitiveSpecies = []
if sensitivity:
for spec in sensitivity:
sensitiveSpecies.append(speciesDict[spec])
system = LiquidReactor(T, initialConcentrations, termination, sensitiveSpecies, sensitivityThreshold)
rmg.reactionSystems.append(system)
def simulator(atol, rtol, sens_atol=1e-6, sens_rtol=1e-4):
rmg.absoluteTolerance = atol
rmg.relativeTolerance = rtol
rmg.sensitivityAbsoluteTolerance = sens_atol
rmg.sensitivityRelativeTolerance = sens_rtol
def solvation(solvent):
# If solvation module in input file, set the RMG solvent variable
if not isinstance(solvent,str):
raise InputError("solvent should be a string like 'water'")
rmg.solvent = solvent
def model(toleranceMoveToCore=None, toleranceKeepInEdge=0.0, toleranceInterruptSimulation=1.0, maximumEdgeSpecies=None):
"""
How to generate the model. `toleranceMoveToCore` must be specified. Other parameters are optional and control the pruning.
"""
if toleranceMoveToCore is None:
raise InputError("You must provide a toleranceMoveToCore value. It should be less than or equal to toleranceInterruptSimulation which is currently {0}".format(toleranceInterruptSimulation))
if toleranceMoveToCore > toleranceInterruptSimulation:
raise InputError("toleranceMoveToCore must be less than or equal to toleranceInterruptSimulation, which is currently {0}".format(toleranceInterruptSimulation))
rmg.fluxToleranceKeepInEdge = toleranceKeepInEdge
rmg.fluxToleranceMoveToCore = toleranceMoveToCore
rmg.fluxToleranceInterrupt = toleranceInterruptSimulation
rmg.maximumEdgeSpecies = maximumEdgeSpecies
def quantumMechanics(
software,
method,
fileStore = None,
scratchDirectory = None,
onlyCyclics = False,
maxRadicalNumber = 0,
):
from rmgpy.qm.main import QMCalculator
rmg.quantumMechanics = QMCalculator()
rmg.quantumMechanics.settings.software = software
rmg.quantumMechanics.settings.method = method
rmg.quantumMechanics.settings.fileStore = fileStore
rmg.quantumMechanics.settings.scratchDirectory = scratchDirectory
rmg.quantumMechanics.settings.onlyCyclics = onlyCyclics
rmg.quantumMechanics.settings.maxRadicalNumber = maxRadicalNumber
def pressureDependence(
method,
temperatures,
pressures,
maximumGrainSize = 0.0,
minimumNumberOfGrains = 0,
interpolation = None,
maximumAtoms=None,
):
from rmgpy.cantherm.pdep import PressureDependenceJob
# Setting the pressureDependence attribute to non-None enables pressure dependence
rmg.pressureDependence = PressureDependenceJob(network=None)
# Process method
rmg.pressureDependence.method = method
# Process interpolation model
rmg.pressureDependence.interpolationModel = interpolation
# Process temperatures
Tmin, Tmax, Tunits, Tcount = temperatures
rmg.pressureDependence.Tmin = Quantity(Tmin, Tunits)
rmg.pressureDependence.Tmax = Quantity(Tmax, Tunits)
rmg.pressureDependence.Tcount = Tcount
rmg.pressureDependence.generateTemperatureList()
# Process pressures
Pmin, Pmax, Punits, Pcount = pressures
rmg.pressureDependence.Pmin = Quantity(Pmin, Punits)
rmg.pressureDependence.Pmax = Quantity(Pmax, Punits)
rmg.pressureDependence.Pcount = Pcount
rmg.pressureDependence.generatePressureList()
# Process grain size and count
rmg.pressureDependence.maximumGrainSize = Quantity(maximumGrainSize)
rmg.pressureDependence.minimumGrainCount = minimumNumberOfGrains
# Process maximum atoms
rmg.pressureDependence.maximumAtoms = maximumAtoms
rmg.pressureDependence.activeJRotor = True
rmg.pressureDependence.activeKRotor = True
rmg.pressureDependence.rmgmode = True
def options(units='si', saveRestartPeriod=None, drawMolecules=False, generatePlots=False, saveSimulationProfiles=False, verboseComments=False, saveEdgeSpecies=False):
rmg.units = units
rmg.saveRestartPeriod = Quantity(saveRestartPeriod) if saveRestartPeriod else None
rmg.drawMolecules = drawMolecules
rmg.generatePlots = generatePlots
rmg.saveSimulationProfiles = saveSimulationProfiles
rmg.verboseComments = verboseComments
rmg.saveEdgeSpecies = saveEdgeSpecies
def generatedSpeciesConstraints(**kwargs):
validConstraints = [
'allowed',
'maximumCarbonAtoms',
'maximumHydrogenAtoms',
'maximumOxygenAtoms',
'maximumNitrogenAtoms',
'maximumSiliconAtoms',
'maximumSulfurAtoms',
'maximumHeavyAtoms',
'maximumRadicalElectrons',
]
for key, value in kwargs.items():
if key not in validConstraints:
raise InputError('Invalid generated species constraint {0!r}.'.format(key))
rmg.speciesConstraints[key] = value
################################################################################
def readInputFile(path, rmg0):
"""
Read an RMG input file at `path` on disk into the :class:`RMG` object
`rmg`.
"""
global rmg, speciesDict
full_path = os.path.abspath(os.path.expandvars(path))
try:
f = open(full_path)
except IOError, e:
logging.error('The input file "{0}" could not be opened.'.format(full_path))
logging.info('Check that the file exists and that you have read access.')
raise e
logging.info('Reading input file "{0}"...'.format(full_path))
rmg = rmg0
rmg.reactionModel = CoreEdgeReactionModel()
rmg.initialSpecies = []
rmg.reactionSystems = []
speciesDict = {}
global_context = { '__builtins__': None }
local_context = {
'__builtins__': None,
'True': True,
'False': False,
'database': database,
'species': species,
'SMARTS': SMARTS,
'SMILES': SMILES,
'InChI': InChI,
'adjacencyList': adjacencyList,
'simpleReactor': simpleReactor,
'liquidReactor': liquidReactor,
'simulator': simulator,
'solvation': solvation,
'model': model,
'quantumMechanics': quantumMechanics,
'pressureDependence': pressureDependence,
'options': options,
'generatedSpeciesConstraints': generatedSpeciesConstraints,
}
try:
exec f in global_context, local_context
except (NameError, TypeError, SyntaxError), e:
logging.error('The input file "{0}" was invalid:'.format(full_path))
logging.exception(e)
raise
finally:
f.close()
# convert keys from species names into species objects.
for reactionSystem in rmg.reactionSystems:
reactionSystem.convertInitialKeysToSpeciesObjects(speciesDict)
logging.info('')
################################################################################
def readThermoInputFile(path, rmg0):
"""
Read an thermo estimation input file at `path` on disk into the :class:`RMG` object
`rmg`.
"""
global rmg, speciesDict
full_path = os.path.abspath(os.path.expandvars(path))
try:
f = open(full_path)
except IOError, e:
logging.error('The input file "{0}" could not be opened.'.format(full_path))
logging.info('Check that the file exists and that you have read access.')
raise e
logging.info('Reading input file "{0}"...'.format(full_path))
rmg = rmg0
rmg.reactionModel = CoreEdgeReactionModel()
rmg.initialSpecies = []
rmg.reactionSystems = []
speciesDict = {}
global_context = { '__builtins__': None }
local_context = {
'__builtins__': None,
'True': True,
'False': False,
'database': database,
'species': species,
'SMARTS': SMARTS,
'SMILES': SMILES,
'InChI': InChI,
'solvation': solvation,
'adjacencyList': adjacencyList,
'quantumMechanics': quantumMechanics,
}
try:
exec f in global_context, local_context
except (NameError, TypeError, SyntaxError), e:
logging.error('The input file "{0}" was invalid:'.format(full_path))
logging.exception(e)
raise
finally:
f.close()
logging.info('')
################################################################################
def saveInputFile(path, rmg):
"""
Save an RMG input file at `path` on disk from the :class:`RMG` object
`rmg`.
"""
f = open(path, 'w')
# Databases
f.write('database(\n')
#f.write(' "{0}",\n'.format(rmg.databaseDirectory))
f.write(' thermoLibraries = {0!r},\n'.format(rmg.thermoLibraries))
f.write(' reactionLibraries = {0!r},\n'.format(rmg.reactionLibraries))
f.write(' seedMechanisms = {0!r},\n'.format(rmg.seedMechanisms))
f.write(' kineticsDepositories = {0!r},\n'.format(rmg.kineticsDepositories))
f.write(' kineticsFamilies = {0!r},\n'.format(rmg.kineticsFamilies))
f.write(' kineticsEstimator = {0!r},\n'.format(rmg.kineticsEstimator))
f.write(')\n\n')
# Species
for species in rmg.initialSpecies:
f.write('species(\n')
f.write(' label = "{0}",\n'.format(species.label))
f.write(' reactive = {0},\n'.format(species.reactive))
f.write(' structure = adjacencyList(\n')
f.write('"""\n')
f.write(species.molecule[0].toAdjacencyList())
f.write('"""),\n')
f.write(')\n\n')
# Reaction systems
for system in rmg.reactionSystems:
if rmg.solvent:
f.write('liquidReactor(\n')
f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(),system.T.units))
f.write(' initialConcentrations={\n')
for species, conc in system.initialConcentrations.iteritems():
f.write(' "{0!s}": ({1:g},"{2!s}"),\n'.format(species.label,conc.getValue(),conc.units))
else:
f.write('simpleReactor(\n')
f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(),system.T.units))
# Convert the pressure from SI pascal units to bar here
# Do something more fancy later for converting to user's desired units for both T and P..
f.write(' pressure = ({0:g},"{1!s}"),\n'.format(system.P.getValue(),system.P.units))
f.write(' initialMoleFractions={\n')
for species, molfrac in system.initialMoleFractions.iteritems():
f.write(' "{0!s}": {1:g},\n'.format(species.label, molfrac))
f.write(' },\n')
# Termination criteria
conversions = ''
for term in system.termination:
if isinstance(term, TerminationTime):
f.write(' terminationTime = ({0:g},"{1!s}"),\n'.format(term.time.getValue(),term.time.units))
else:
conversions += ' "{0:s}": {1:g},\n'.format(term.species.label, term.conversion)
if conversions:
f.write(' terminationConversion = {\n')
f.write(conversions)
f.write(' },\n')
# Sensitivity analysis
if system.sensitivity:
f.write(' sensitivity = {0},\n'.format(system.sensitivity))
f.write(' sensitivityThreshold = {0},\n'.format(system.sensitivityThreshold))
f.write(')\n\n')
if rmg.solvent:
f.write("solvation(\n solvent = '{0!s}'\n)\n\n".format(rmg.solvent))
# Simulator tolerances
f.write('simulator(\n')
f.write(' atol = {0:g},\n'.format(rmg.absoluteTolerance))
f.write(' rtol = {0:g},\n'.format(rmg.relativeTolerance))
f.write(')\n\n')
# Model
f.write('model(\n')
f.write(' toleranceMoveToCore = {0:g},\n'.format(rmg.fluxToleranceMoveToCore))
f.write(' toleranceKeepInEdge = {0:g},\n'.format(rmg.fluxToleranceKeepInEdge))
f.write(' toleranceInterruptSimulation = {0:g},\n'.format(rmg.fluxToleranceInterrupt))
f.write(' maximumEdgeSpecies = {0:d},\n'.format(rmg.maximumEdgeSpecies))
f.write(')\n\n')
# Pressure Dependence
if rmg.pressureDependence:
f.write('pressureDependence(\n')
f.write(' method = "{0!s}",\n'.format(rmg.pressureDependence.method))
f.write(' maximumGrainSize = ({0:g},"{1!s}"),\n'.format(rmg.pressureDependence.grainSize.getValue(),rmg.pressureDependence.grainSize.units))
f.write(' minimumNumberOfGrains = {0},\n'.format(rmg.pressureDependence.grainCount))
f.write(' temperatures = ({0:g},{1:g},"{2!s}",{3:d}),\n'.format(
rmg.pressureDependence.Tmin.getValue(),
rmg.pressureDependence.Tmax.getValue(),
rmg.pressureDependence.Tmax.units,
rmg.pressureDependence.Tcount,
))
f.write(' pressures = ({0:g},{1:g},"{2!s}",{3:d}),\n'.format(
rmg.pressureDependence.Pmin.getValue(),
rmg.pressureDependence.Pmax.getValue(),
rmg.pressureDependence.Pmax.units,
rmg.pressureDependence.Pcount,
))
f.write(' interpolation = {0},\n'.format(rmg.pressureDependence.model))
f.write(')\n\n')
if rmg.quantumMechanics:
f.write('quantumMechanics(\n')
f.write(' software="{0!s}",\n'.format(rmg.quantumMechanics.settings.software))
f.write(' method="{0!s}",\n'.format(rmg.quantumMechanics.settings.method))
f.write(' onlyCyclics="{0}",\n'.format(rmg.quantumMechanics.settings.onlyCyclics))
f.write(' maxRadicalNumber="{0!s}",\n'.format(rmg.quantumMechanics.settings.maxRadicalNumber))
f.write(')\n\n')
# Options
f.write('options(\n')
f.write(' units = "{0}",\n'.format(rmg.units))
if rmg.saveRestartPeriod:
f.write(' saveRestartPeriod = ({0},"{1}"),\n'.format(rmg.saveRestartPeriod.getValue(), rmg.saveRestartPeriod.units))
else:
f.write(' saveRestartPeriod = None,\n')
f.write(' drawMolecules = {0},\n'.format(rmg.drawMolecules))
f.write(' generatePlots = {0},\n'.format(rmg.generatePlots))
f.write(' saveSimulationProfiles = {0},\n'.format(rmg.saveSimulationProfiles))
f.write(' verboseComments = {0},\n'.format(rmg.verboseComments))
f.write(')\n\n')
f.close()
|
|
"""
Form Widget classes specific to the Django admin site.
"""
import copy
import json
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models.deletion import CASCADE
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.html import smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import get_language, gettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'core.js',
'SelectBox.js',
'SelectFilter2.js',
]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super().__init__(attrs, choices)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'selectfilter'
if self.is_stacked:
context['widget']['attrs']['class'] += 'stacked'
context['widget']['attrs']['data-field-name'] = self.verbose_name
context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)
return context
class AdminDateWidget(forms.DateInput):
class Media:
js = [
'admin/js/calendar.js',
'admin/js/admin/DateTimeShortcuts.js',
]
def __init__(self, attrs=None, format=None):
attrs = {'class': 'vDateField', 'size': '10', **(attrs or {})}
super().__init__(attrs=attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = [
'admin/js/calendar.js',
'admin/js/admin/DateTimeShortcuts.js',
]
def __init__(self, attrs=None, format=None):
attrs = {'class': 'vTimeField', 'size': '8', **(attrs or {})}
super().__init__(attrs=attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['date_label'] = _('Date:')
context['time_label'] = _('Time:')
return context
class AdminRadioSelect(forms.RadioSelect):
template_name = 'admin/widgets/radio.html'
class AdminFileWidget(forms.ClearableFileInput):
template_name = 'admin/widgets/clearable_file_input.html'
def url_params_from_lookup_dict(lookups):
"""
Convert the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
v = ('0', '1')[v]
else:
v = str(v)
params[k] = v
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
template_name = 'admin/widgets/foreign_key_raw_id.html'
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
rel_to = self.rel.model
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
related_url += '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
context['related_url'] = mark_safe(related_url)
context['link_title'] = _('Lookup')
# The JavaScript code looks for this class.
context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField')
else:
context['related_url'] = None
if context['widget']['value']:
context['link_label'], context['link_url'] = self.label_and_url_for_value(value)
else:
context['link_label'] = None
return context
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_and_url_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist, ValidationError):
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14), url
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
template_name = 'admin/widgets/many_to_many_raw_id.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'
return context
def url_parameters(self):
return self.base_url_parameters()
def label_and_url_for_value(self, value):
return '', ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def format_value(self, value):
return ','.join(str(v) for v in value) if value else ''
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = 'admin/widgets/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False,
can_view_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
self.can_view_related = not multiple and can_view_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def get_context(self, name, value, attrs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'rendered_widget': self.widget.render(name, value, attrs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
'can_add_related': self.can_add_related,
'can_change_related': self.can_change_related,
'can_delete_related': self.can_delete_related,
'can_view_related': self.can_view_related,
}
if self.can_add_related:
context['add_related_url'] = self.get_related_url(info, 'add')
if self.can_delete_related:
context['delete_related_template_url'] = self.get_related_url(info, 'delete', '__fk__')
if self.can_view_related or self.can_change_related:
context['change_related_template_url'] = self.get_related_url(info, 'change', '__fk__')
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vLargeTextField', **(attrs or {})})
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vTextField', **(attrs or {})})
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vTextField', **(attrs or {})})
class AdminURLFieldWidget(forms.URLInput):
template_name = 'admin/widgets/url.html'
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vURLField', **(attrs or {})})
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['current_label'] = _('Currently:')
context['change_label'] = _('Change:')
context['widget']['href'] = smart_urlquote(context['widget']['value']) if value else ''
return context
class AdminIntegerFieldWidget(forms.NumberInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
super().__init__(attrs={'class': self.class_name, **(attrs or {})})
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminUUIDInputWidget(forms.TextInput):
def __init__(self, attrs=None):
super().__init__(attrs={'class': 'vUUIDField', **(attrs or {})})
# Mapping of lowercase language codes [returned by Django's get_language()] to
# language codes supported by select2.
# See django/contrib/admin/static/admin/js/vendor/select2/i18n/*
SELECT2_TRANSLATIONS = {x.lower(): x for x in [
'ar', 'az', 'bg', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et',
'eu', 'fa', 'fi', 'fr', 'gl', 'he', 'hi', 'hr', 'hu', 'id', 'is',
'it', 'ja', 'km', 'ko', 'lt', 'lv', 'mk', 'ms', 'nb', 'nl', 'pl',
'pt-BR', 'pt', 'ro', 'ru', 'sk', 'sr-Cyrl', 'sr', 'sv', 'th',
'tr', 'uk', 'vi',
]}
SELECT2_TRANSLATIONS.update({'zh-hans': 'zh-CN', 'zh-hant': 'zh-TW'})
class AutocompleteMixin:
"""
Select widget mixin that loads options from AutocompleteJsonView via AJAX.
Renders the necessary data attributes for select2 and adds the static form
media.
"""
url_name = '%s:%s_%s_autocomplete'
def __init__(self, rel, admin_site, attrs=None, choices=(), using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
self.choices = choices
self.attrs = {} if attrs is None else attrs.copy()
def get_url(self):
model = self.rel.model
return reverse(self.url_name % (self.admin_site.name, model._meta.app_label, model._meta.model_name))
def build_attrs(self, base_attrs, extra_attrs=None):
"""
Set select2's AJAX attributes.
Attributes can be set using the html5 data attribute.
Nested attributes require a double dash as per
https://select2.org/configuration/data-attributes#nested-subkey-options
"""
attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs)
attrs.setdefault('class', '')
attrs.update({
'data-ajax--cache': 'true',
'data-ajax--type': 'GET',
'data-ajax--url': self.get_url(),
'data-theme': 'admin-autocomplete',
'data-allow-clear': json.dumps(not self.is_required),
'data-placeholder': '', # Allows clearing of the input.
'class': attrs['class'] + (' ' if attrs['class'] else '') + 'admin-autocomplete',
})
return attrs
def optgroups(self, name, value, attr=None):
"""Return selected options based on the ModelChoiceIterator."""
default = (None, [], 0)
groups = [default]
has_selected = False
selected_choices = {
str(v) for v in value
if str(v) not in self.choices.field.empty_values
}
if not self.is_required and not self.allow_multiple_selected:
default[1].append(self.create_option(name, '', '', False, 0))
choices = (
(obj.pk, self.choices.field.label_from_instance(obj))
for obj in self.choices.queryset.using(self.db).filter(pk__in=selected_choices)
)
for option_value, option_label in choices:
selected = (
str(option_value) in value and
(has_selected is False or self.allow_multiple_selected)
)
has_selected |= selected
index = len(default[1])
subgroup = default[1]
subgroup.append(self.create_option(name, option_value, option_label, selected_choices, index))
return groups
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
i18n_name = SELECT2_TRANSLATIONS.get(get_language())
i18n_file = ('admin/js/vendor/select2/i18n/%s.js' % i18n_name,) if i18n_name else ()
return forms.Media(
js=(
'admin/js/vendor/jquery/jquery%s.js' % extra,
'admin/js/vendor/select2/select2.full%s.js' % extra,
) + i18n_file + (
'admin/js/jquery.init.js',
'admin/js/autocomplete.js',
),
css={
'screen': (
'admin/css/vendor/select2/select2%s.css' % extra,
'admin/css/autocomplete.css',
),
},
)
class AutocompleteSelect(AutocompleteMixin, forms.Select):
pass
class AutocompleteSelectMultiple(AutocompleteMixin, forms.SelectMultiple):
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteTable"
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
parameters, # type: "_models.RouteTable"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteTable"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
parameters, # type: "_models.RouteTable"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteTable"]
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteTable"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
route_table_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteTable"]
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteTableListResult"]
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteTableListResult"]
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
|
from test import support
import random
import unittest
from functools import cmp_to_key
verbose = support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print(" checking", tag)
orig = raw[:] # save input in case of error
if compare:
raw.sort(key=cmp_to_key(compare))
else:
raw.sort()
if len(expected) != len(raw):
print("error in", tag)
print("length mismatch;", len(expected), len(raw))
print(expected)
print(orig)
print(raw)
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print("error in", tag)
print("out of order at index", i, good, maybe)
print(expected)
print(orig)
print(raw)
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print(" complaining at", self, other)
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = list(range(n))
if verbose:
print("Testing size", n)
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: (b>a)-(b<a))
if verbose:
print(" Checking against an insane comparison function.")
print(" If the implementation isn't careful, this may segfault.")
s = x[:]
s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1))
check("an insane function left some permutation", x, s)
if len(x) >= 2:
def bad_key(x):
raise RuntimeError
s = x[:]
self.assertRaises(RuntimeError, s.sort, key=bad_key)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in range(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
self.assertRaises(ValueError, L.sort)
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return (x > y) - (x < y)
L = [1,2]
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
def mutating_cmp(x, y):
L.append(3)
del L[:]
return (x > y) - (x < y)
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
def my_cmp(x, y):
xlower, ylower = x.lower(), y.lower()
return (xlower > ylower) - (xlower < ylower)
copy.sort(key=cmp_to_key(my_cmp))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, key=lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy = data[:]
data.sort(key=lambda t: t[0]) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = list(range(-2, 2))
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, key=lambda x: 1/x)
self.assertEqual(data, dup)
def test_key_with_mutation(self):
data = list(range(10))
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
def test_key_with_mutating_del(self):
data = list(range(10))
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
def __lt__(self, other):
return id(self) < id(other)
self.assertRaises(ValueError, data.sort, key=SortKiller)
def test_key_with_mutating_del_and_exception(self):
data = list(range(10))
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = list(range(20))
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = list(range(100))
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, list(range(99,-1,-1)))
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy1 = data[:]
copy2 = data[:]
def my_cmp(x, y):
x0, y0 = x[0], y[0]
return (x0 > y0) - (x0 < y0)
def my_cmp_reversed(x, y):
x0, y0 = x[0], y[0]
return (y0 > x0) - (y0 < x0)
data.sort(key=cmp_to_key(my_cmp), reverse=True)
copy1.sort(key=cmp_to_key(my_cmp_reversed))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def check_against_PyObject_RichCompareBool(self, L):
## The idea here is to exploit the fact that unsafe_tuple_compare uses
## PyObject_RichCompareBool for the second elements of tuples. So we have,
## for (most) L, sorted(L) == [y[1] for y in sorted([(0,x) for x in L])]
## This will work as long as __eq__ => not __lt__ for all the objects in L,
## which holds for all the types used below.
##
## Testing this way ensures that the optimized implementation remains consistent
## with the naive implementation, even if changes are made to any of the
## richcompares.
##
## This function tests sorting for three lists (it randomly shuffles each one):
## 1. L
## 2. [(x,) for x in L]
## 3. [((x,),) for x in L]
random.seed(0)
random.shuffle(L)
L_1 = L[:]
L_2 = [(x,) for x in L]
L_3 = [((x,),) for x in L]
for L in [L_1, L_2, L_3]:
optimized = sorted(L)
reference = [y[1] for y in sorted([(0,x) for x in L])]
for (opt, ref) in zip(optimized, reference):
self.assertIs(opt, ref)
#note: not assertEqual! We want to ensure *identical* behavior.
class TestOptimizedCompares(unittest.TestCase):
def test_safe_object_compare(self):
heterogeneous_lists = [[0, 'foo'],
[0.0, 'foo'],
[('foo',), 'foo']]
for L in heterogeneous_lists:
self.assertRaises(TypeError, L.sort)
self.assertRaises(TypeError, [(x,) for x in L].sort)
self.assertRaises(TypeError, [((x,),) for x in L].sort)
float_int_lists = [[1,1.1],
[1<<70,1.1],
[1.1,1],
[1.1,1<<70]]
for L in float_int_lists:
check_against_PyObject_RichCompareBool(self, L)
def test_unsafe_object_compare(self):
# This test is by ppperry. It ensures that unsafe_object_compare is
# verifying ms->key_richcompare == tp->richcompare before comparing.
class WackyComparator(int):
def __lt__(self, other):
elem.__class__ = WackyList2
return int.__lt__(self, other)
class WackyList1(list):
pass
class WackyList2(list):
def __lt__(self, other):
raise ValueError
L = [WackyList1([WackyComparator(i), i]) for i in range(10)]
elem = L[-1]
with self.assertRaises(ValueError):
L.sort()
L = [WackyList1([WackyComparator(i), i]) for i in range(10)]
elem = L[-1]
with self.assertRaises(ValueError):
[(x,) for x in L].sort()
# The following test is also by ppperry. It ensures that
# unsafe_object_compare handles Py_NotImplemented appropriately.
class PointlessComparator:
def __lt__(self, other):
return NotImplemented
L = [PointlessComparator(), PointlessComparator()]
self.assertRaises(TypeError, L.sort)
self.assertRaises(TypeError, [(x,) for x in L].sort)
# The following tests go through various types that would trigger
# ms->key_compare = unsafe_object_compare
lists = [list(range(100)) + [(1<<70)],
[str(x) for x in range(100)] + ['\uffff'],
[bytes(x) for x in range(100)],
[cmp_to_key(lambda x,y: x<y)(x) for x in range(100)]]
for L in lists:
check_against_PyObject_RichCompareBool(self, L)
def test_unsafe_latin_compare(self):
check_against_PyObject_RichCompareBool(self, [str(x) for
x in range(100)])
def test_unsafe_long_compare(self):
check_against_PyObject_RichCompareBool(self, [x for
x in range(100)])
def test_unsafe_float_compare(self):
check_against_PyObject_RichCompareBool(self, [float(x) for
x in range(100)])
def test_unsafe_tuple_compare(self):
# This test was suggested by Tim Peters. It verifies that the tuple
# comparison respects the current tuple compare semantics, which do not
# guarantee that x < x <=> (x,) < (x,)
#
# Note that we don't have to put anything in tuples here, because
# the check function does a tuple test automatically.
check_against_PyObject_RichCompareBool(self, [float('nan')]*100)
check_against_PyObject_RichCompareBool(self, [float('nan') for
_ in range(100)])
def test_not_all_tuples(self):
self.assertRaises(TypeError, [(1.0, 1.0), (False, "A"), 6].sort)
self.assertRaises(TypeError, [('a', 1), (1, 'a')].sort)
self.assertRaises(TypeError, [(1, 'a'), ('a', 1)].sort)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
|
"""
This file is part of The Cannon analysis project.
Copyright 2014 Melissa Ness.
# urls
- http://iopscience.iop.org/1538-3881/146/5/133/suppdata/aj485195t4_mrt.txt for calibration stars
- http://data.sdss3.org/irSpectrumDetail?locid=4330&commiss=0&apogeeid=2M17411636-2903150&show_aspcap=True object explorer
- http://data.sdss3.org/basicIRSpectra/searchStarA
- http://data.sdss3.org/sas/dr10/apogee/spectro/redux/r3/s3/a3/ for the data files
# to-do
- need to add a test that the wavelength range is the same - and if it isn't interpolate to the same range
- format PEP8-ish (four-space tabs, for example)
- take logg_cut as an input
- extend to perform quadratic fitting
"""
from astropy.io import fits as pyfits
import scipy
import glob
import pickle
import pylab
from scipy import interpolate
from scipy import ndimage
from scipy import optimize as opt
import numpy as np
LARGE = 1e2 # sigma value to use for bad continuum-normalized data; MAGIC
def weighted_median(values, weights, quantile):
"""weighted_median
keywords
--------
values: ndarray
input values
weights: ndarray
weights to apply to each value in values
quantile: float
quantile selection
returns
-------
val: float
median value
"""
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
cvalues = cvalues / cvalues[-1]
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx]
def continuum_normalize(dataall, delta_lambda=50):
"""continuum_normalize
keywords
--------
dataall: ndarray, shape=(Nlambda, Nstar, 3)
wavelengths, flux densities, errors
delta_lambda:
half-width of median region in angstroms
returns
-------
continuum: (Nlambda, Nstar)
continuum level
.. note::
* does a lot of stuff *other* than continuum normalization
.. todo::
* bugs: for loops!
"""
Nlambda, Nstar, foo = dataall.shape
continuum = np.zeros((Nlambda, Nstar))
# sanitize inputs
for jj in range(Nstar):
bad_a = np.logical_or(np.isnan(dataall[:, jj, 1]) ,np.isinf(dataall[:,jj, 1]))
bad_b = np.logical_or(dataall[:, jj, 2] <= 0. , np.isnan(dataall[:, jj, 2]))
bad = np.logical_or( np.logical_or(bad_a, bad_b) , np.isinf(dataall[:, jj, 2]))
dataall[bad, jj, 1] = 0.
dataall[bad, jj, 2] = np.Inf #LARGE#np.Inf #100. #np.Inf
continuum = np.zeros((Nlambda, Nstar))
assert foo == 3
for star in range(Nstar):
print "get_continuum(): working on star" ,star
for ll, lam in enumerate(dataall[:, 0, 0]):
if dataall[ll, star, 0] != lam:
print dataall[ll,star,0], lam , dataall[ll,0,0]
print ll, star
print ll+1, star+1, dataall[ll+1, star+1, 0], dataall[ll+1,0,0]
print ll+2, star+2, dataall[ll+2, star+2, 0], dataall[ll+2,0,0]
assert False
indx = (np.where(abs(dataall[:, star, 0] - lam) < delta_lambda))[0]
ivar = 1. / (dataall[indx, star, 2] ** 2)
ivar = np.array(ivar)
continuum[ll, star] = weighted_median(dataall[indx, star, 1], ivar, 0.90)
for jj in range(Nstar):
bad = np.where(continuum[:,jj] <= 0)
continuum[bad,jj] = 1.
dataall[:, jj, 1] /= continuum[:,jj]
dataall[:, jj, 2] /= continuum[:,jj]
dataall[bad,jj, 1] = 1.
dataall[bad,jj, 2] = LARGE
bad = np.where(dataall[:, jj, 2] > LARGE)
dataall[bad,jj, 1] = 1.
dataall[bad,jj, 2] = LARGE
return dataall
def get_normalized_test_data(testfile):
"""
inputs
------
testfile: str
the file in with the list of fits files want to test - if normed, move on,
if not normed, norm it
also save a SNR file with list of SNR values if it is not present
returns
-------
testdata:
"""
name = testfile.split('/')[-2]
testdir = testfile.split('stars')[0]
if glob.glob(name+'.pickle'):
file_in2 = open(name+'.pickle', 'r')
testdata = pickle.load(file_in2)
file_in2.close()
return testdata
a = open(testfile, 'r')
al2 = a.readlines()
bl2 = []
for each in al2:
bl2.append(testdir+each.strip())
for jj,each in enumerate(bl2):
a = pyfits.open(each)
if shape(a[6].data) != (8575,):
ydata = a[1].data[0]
ysigma = a[2].data[0]
len_data = len(a[1].data[0])
if jj == 0:
nlam = len(a[1].data[0])
testdata = np.zeros((nlam, len(bl2), 3))
if shape(a[1].data) == (8575,):
ydata = a[1].data
ysigma = a[2].data
len_data = len(a[1].data)
if jj == 0:
nlam = len(a[1].data)
testdata = np.zeros((nlam, len(bl2), 3))
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
if jj == 0:
nlam = len(a[1].data)
testdata = np.zeros((nlam, len(bl2), 3))
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10**aval for aval in wl_full_log]
xdata = wl_full
testdata[:, jj, 0] = xdata
testdata[:, jj, 1] = ydata
testdata[:, jj, 2] = ysigma
testdata = continuum_normalize(testdata) # testdata
file_in = open(name+'.pickle', 'w')
pickle.dump(testdata, file_in)
file_in.close()
return testdata
def get_normalized_training_data():
"""
inputs
------
testfile: str
the file in with the list of fits files in the training data
if not normed, norm it
have brittle iputs of the test14 which is the training data labels from ASPCAP and the ages which is the ages made by me.Note the Pleiades is my trainng labels for Teff, log g
returns
-------
dataall - the normalised test data
"""
if glob.glob('normed_data.pickle'):
file_in2 = open('normed_data.pickle', 'r')
dataall, metaall, labels, Ametaall, cluster_name = pickle.load(file_in2)
file_in2.close()
return dataall, metaall, labels, Ametaall, cluster_name
fn = 'starsin_SFD_Pleiades.txt'
T_est,g_est,feh_est = np.loadtxt(fn, usecols = (4,6,8), unpack =1)
T_A,g_A,feh_A = np.loadtxt(fn, usecols = (3,5,7), unpack =1) # these are APOGEE values - interesting to compare e.g for Pleiades and if I get my own temperatures
age_est = np.loadtxt('ages.txt', usecols = (0,), unpack =1)
labels = ["teff", "logg", "feh", "age" ]
a = open(fn, 'r')
al = a.readlines()
bl = []
cluster_name = []
for each in al:
bl.append(each.split()[0])
cluster_name.append(each.split()[1])
for jj,each in enumerate(bl):
each = each.strip('\n')
a = pyfits.open(each)
b = pyfits.getheader(each)
start_wl = a[1].header['CRVAL1']
diff_wl = a[1].header['CDELT1']
print np.atleast_2d(a[1].data).shape
if jj == 0:
nmeta = len(labels)
nlam = len(a[1].data)
val = diff_wl*(nlam) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
ydata = (np.atleast_2d(a[1].data))[0]
ydata_err = (np.atleast_2d(a[2].data))[0]
ydata_flag = (np.atleast_2d(a[3].data))[0]
assert len(ydata) == nlam
wl_full = [10**aval for aval in wl_full_log]
xdata= np.array(wl_full)
ydata = np.array(ydata)
ydata_err = np.array(ydata_err)
starname2 = each.split('.fits')[0]+'.txt'
sigma = (np.atleast_2d(a[2].data))[0]# /y1
if jj == 0:
npix = len(xdata)
dataall = np.zeros((npix, len(bl), 3))
metaall = np.ones((len(bl), nmeta))
Ametaall = np.ones((len(bl), nmeta))
if jj > 0:
assert xdata[0] == dataall[0, 0, 0]
dataall[:, jj, 0] = xdata
dataall[:, jj, 1] = ydata
dataall[:, jj, 2] = sigma
for k in range(0,len(bl)):
# must be synchronised with labels
metaall[k,0] = T_est[k]
metaall[k,1] = g_est[k]
metaall[k,2] = feh_est[k]
metaall[k,3] = age_est[k]
Ametaall[k,0] = T_A[k]
Ametaall[k,1] = g_A[k]
Ametaall[k,2] = feh_A[k]
dataall = continuum_normalize(dataall) #dataall
file_in = open('normed_data.pickle', 'w')
pickle.dump((dataall, metaall, labels, Ametaall, cluster_name), file_in)
file_in.close()
return dataall, metaall, labels, Ametaall, cluster_name
def do_one_regression_at_fixed_scatter(data, features, scatter):
"""
Parameters
----------
data: ndarray, [nobjs, 3]
wavelengths, fluxes, invvars
meta: ndarray, [nobjs, nmeta]
Teff, Feh, etc, etc
scatter:
Returns
-------
coeff: ndarray
coefficients of the fit
MTCinvM: ndarray
inverse covariance matrix for fit coefficients
chi: float
chi-squared at best fit
logdet_Cinv: float
inverse of the log determinant of the cov matrice
:math:`\sum(\log(Cinv))`
"""
# least square fit
#pick = logical_and(data[:,1] < np.median(data[:,1]) + np.std(data[:,1])*3. , data[:,1] > median(data[:,1]) - np.std(data[:,1])*3.)#5*std(data[:,1]) )
Cinv = 1. / (data[:, 2] ** 2 + scatter ** 2) # invvar slice of data
M = features
MTCinvM = np.dot(M.T, Cinv[:, None] * M) # craziness b/c Cinv isnt a matrix
x = data[:, 1] # intensity slice of data
MTCinvx = np.dot(M.T, Cinv * x)
try:
coeff = np.linalg.solve(MTCinvM, MTCinvx)
except np.linalg.linalg.LinAlgError:
print MTCinvM, MTCinvx, data[:,0], data[:,1], data[:,2]
print features
assert np.all(np.isfinite(coeff))
chi = np.sqrt(Cinv) * (x - np.dot(M, coeff))
logdet_Cinv = np.sum(np.log(Cinv))
return (coeff, MTCinvM, chi, logdet_Cinv )
def do_one_regression(data, metadata):
"""do_one_regression
This determines the scatter of the fit at a single wavelength for all stars
Parameters
----------
data:
metadata:
returns
-------
"""
ln_s_values = np.arange(np.log(0.0001), 0., 0.5)
chis_eval = np.zeros_like(ln_s_values)
for ii, ln_s in enumerate(ln_s_values):
foo, bar, chi, logdet_Cinv = do_one_regression_at_fixed_scatter(data, metadata, scatter = np.exp(ln_s))
chis_eval[ii] = np.sum(chi * chi) - logdet_Cinv
if np.any(np.isnan(chis_eval)):
s_best = np.exp(ln_s_values[-1])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
lowest = np.argmin(chis_eval)
if lowest == 0 or lowest == len(ln_s_values) + 1:
s_best = np.exp(ln_s_values[lowest])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
ln_s_values_short = ln_s_values[np.array([lowest-1, lowest, lowest+1])]
chis_eval_short = chis_eval[np.array([lowest-1, lowest, lowest+1])]
z = np.polyfit(ln_s_values_short, chis_eval_short, 2)
f = np.poly1d(z)
fit_pder = np.polyder(z)
fit_pder2 = pylab.polyder(fit_pder)
s_best = np.exp(np.roots(fit_pder)[0])
return do_one_regression_at_fixed_scatter(data, metadata, scatter = s_best) + (s_best, )
def do_regressions(dataall, features):
"""
This loops through all the regressions = doing the fit at a single wavelength for all stars, for all wavelengths
"""
nlam, nobj, ndata = dataall.shape
nobj, npred = features.shape
featuresall = np.zeros((nlam,nobj,npred))
featuresall[:, :, :] = features[None, :, :]
return map(do_one_regression, dataall, featuresall)
def train(dataall, metaall, order, fn, Ametaall, cluster_name, logg_cut=100., teff_cut=0., leave_out=None):
"""
- `leave out` must be in the correct form to be an input to `np.delete`
- this is the routine that determines the coefficients from the training data
"""
# good = np.logical_and((metaall[:, 1] < logg_cut), (metaall[:,0] > teff_cut) )
# dataall = dataall[:, good]
# metaall = metaall[good]
diff_t = np.abs(array(metaall[:,0] - Ametaall[:,0]) )
good = np.logical_and((metaall[:, 1] < logg_cut), (diff_t < 600. ) )
dataall = dataall[:, good]
metaall = metaall[good]
nstars, nmeta = metaall.shape
if leave_out is not None: #
dataall = np.delete(dataall, [leave_out], axis = 1)
metaall = np.delete(metaall, [leave_out], axis = 0)
offsets = np.mean(metaall, axis=0)
features = np.ones((nstars, 1))
if order >= 1:
features = np.hstack((features, metaall - offsets))
if order >= 2:
newfeatures = np.array([np.outer(m, m)[np.triu_indices(nmeta)] for m in (metaall - offsets)])
features = np.hstack((features, newfeatures))
blob = do_regressions(dataall, features)
coeffs = np.array([b[0] for b in blob])
covs = np.array([np.linalg.inv(b[1]) for b in blob])
chis = np.array([b[2] for b in blob])
chisqs = np.array([np.dot(b[2],b[2]) - b[3] for b in blob]) # holy crap be careful
scatters = np.array([b[4] for b in blob])
fd = open(fn, "w")
pickle.dump((dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs), fd)
fd.close()
## non linear stuff below ##
# returns the non linear function
#def func(x1, x2, x3, x4, x5, x6, x7, x8, x9, a, b, c):
# f = (0
# + x1*a
# + x2*b
# + x3*c
# + x4* a**2#
# + x5 * a * b
# + x6 * a * c
# + x7*b**2
# + x8 * b * c
# + x9*c**2 )
# return f
# this is the form of the function below of the labels (a,b,c,d) = [Teff, logg , [FeH], age and their coefficients x1-x14
def func(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 , x13, x14, a, b, c, d):
f = (0
+ x1*a
+ x2*b
+ x3*c
+ x4*d
+ x5* a**2#
+ x6 * a * b
+ x7 * a * c
+ x8 * a * d
+ x9* b**2
+ x10 * b * c
+ x11 * b * d
+ x12* c**2
+ x13 * c * d
+ x14* d**2 )
return f
## thankyou stack overflow for the example below on how to use the optimse function
#def nonlinear_invert(f, x1, x2, x3, x4, x5, x6, x7, x8, x9 ,sigmavals):
# def wrapped_func(observation_points, a, b, c):
# x1, x2, x3, x4, x5, x6, x7, x8, x9 = observation_points
# return func(x1, x2, x3, x4, x5, x6, x7, x8, x9, a, b, c)
# thankyou stack overflow for the example below on how to use the optimse function
def nonlinear_invert(f, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, sigmavals):
def wrapped_func(observation_points, a, b, c, d):
x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14 = observation_points
return func(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, a, b, c, d)
xdata = np.vstack([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14 ])
model, cov = opt.curve_fit(wrapped_func, xdata, f, sigma = sigmavals)#absolute_sigma = True) is not an option in my version of scipy will upgrade scipy
return model, cov
def infer_labels_nonlinear(fn_pickle,testdata, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
- this routine determines the labels for a new spectra - can do cuts on the flux for testing if want to
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisq = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, np.shape(coeffs)[1]-1, np.shape(coeffs)[1]-1.))
covs_all = np.zeros((nstars,nlabels, nlabels))
for jj in range(0,nstars):
if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
f = ydata_norm
t,g,feh,age = metaall[:,0], metaall[:,1], metaall[:,2], metaall[:,3]
x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14 = coeffs[:,0], coeffs[:,1], coeffs[:,2], coeffs[:,3], coeffs[:,4], coeffs[:,5], coeffs[:,6] ,coeffs[:,7], coeffs[:,8], coeffs[:,9], \
coeffs[:,10], coeffs[:,11], coeffs[:, 12], coeffs[:,13],coeffs[:,14]
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
Params,covs = nonlinear_invert(f, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, 1/Cinv**0.5 )
Params = Params+offsets
value_cut = -14
coeffs_slice = coeffs[:,value_cut:]
MCM_rotate = np.dot(coeffs_slice.T, Cinv[:,None] * coeffs_slice)
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
covs_all[jj,:,:] = covs
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, covs_all), file_in)
file_in.close()
return Params_all , MCM_rotate_all
def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower,weak_upper):
#def infer_labels(fn_pickle,testdata, fout_pickle, weak_lower=0.935,weak_upper=0.98):
"""
- this is the linear case of getting labels for new spectra
best log g = weak_lower = 0.95, weak_upper = 0.98
best teff = weak_lower = 0.95, weak_upper = 0.99
best_feh = weak_lower = 0.935, weak_upper = 0.98
this returns the parameters for a field of data - and normalises if it is not already normalised
this is slow because it reads a pickle file
"""
file_in = open(fn_pickle, 'r')
dataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
nstars = (testdata.shape)[1]
nlabels = len(labels)
Params_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, nlabels, nlabels))
for jj in range(0,nstars):
if np.any(testdata[:,jj,0] != dataall[:, 0, 0]):
print testdata[range(5),jj,0], dataall[range(5),0,0]
assert False
xdata = testdata[:,jj,0]
ydata = testdata[:,jj,1]
ysigma = testdata[:,jj,2]
ydata_norm = ydata - coeffs[:,0] # subtract the mean
cut_to = shape(metaall)[1]*-1.
coeffs_slice = coeffs[:,cut_to:]
#ind1 = np.logical_and(logical_and(dataall[:,jj,0] > 16200., dataall[:,jj,0] < 16500.), np.logical_and(ydata > weak_lower , ydata < weak_upper))
ind1 = np.logical_and(ydata > weak_lower , ydata < weak_upper)
Cinv = 1. / (ysigma ** 2 + scatters ** 2)
MCM_rotate = np.dot(coeffs_slice[ind1].T, Cinv[:,None][ind1] * coeffs_slice[ind1])
MCy_vals = np.dot(coeffs_slice[ind1].T, Cinv[ind1] * ydata_norm[ind1])
Params = np.linalg.solve(MCM_rotate, MCy_vals)
Params = Params + offsets
print Params
Params_all[jj,:] = Params
MCM_rotate_all[jj,:,:] = MCM_rotate
file_in = open(fout_pickle, 'w')
pickle.dump((Params_all, MCM_rotate_all), file_in)
file_in.close()
return Params_all , MCM_rotate_all
def lookatfits(fn_pickle, pixelvalues,testdataall):
# """"
# TEST ROUTINE - PLOTTING ROUTINE
# this is to plot the individual pixel fits on the 6x6 panel
# """"
file_in = open(fn_pickle, 'r')
testdataall, metaall, labels, offsets, coeffs, covs, scatters,chis,chisqs = pickle.load(file_in)
file_in.close()
axis_t, axis_g, axis_feh = metaall[:,0], metaall[:,1], metaall[:,2]
nstars = (testdataall.shape)[1]
offsets = np.mean(metaall, axis=0)
features = np.ones((nstars, 1))
features = np.hstack((features, metaall - offsets))
features2 = np.hstack((features, metaall ))
for each in pixelvalues:
flux_val_abs = testdataall[each,:,1]
flux_val_norm = testdataall[each,:,1] - np.dot(coeffs, features.T)[each,:]
coeff = coeffs[each,:]
y_feh_abs = coeff[3]*features[:,3] + coeff[0]*features[:,0]
y_feh_norm = coeff[3]*features[:,3] + coeff[0]*features[:,0] -(coeff[3]*features2[:,3] + coeff[0]*features2[:,0])
y_g_abs = coeff[2]*features[:,2] + coeff[0]*features[:,0]
y_g_norm = coeff[2]*features[:,2] + coeff[0]*features[:,0] - (coeff[2]*features2[:,2] + coeff[0]*features2[:,0])
y_t_abs = coeff[1]*features[:,1] + coeff[0]*features[:,0]
y_t_norm = coeff[1]*features[:,1] + coeff[0]*features[:,0] - (coeff[1]*features2[:,1] + coeff[0]*features2[:,0])
for flux_val, y_feh, y_g, y_t, namesave,lab,ylims in zip([flux_val_abs, flux_val_norm], [y_feh_abs,y_feh_norm],[y_g_abs, y_g_norm], [y_t_abs,y_t_norm],['abs','norm'], ['flux','flux - mean'],
[[-0.2,1.2], [-1,1]] ):
y_meandiff = coeff[0] - flux_val
fig = plt.figure(figsize = [12.0, 12.0])
#
ax = plt.subplot(3,2,1)
pick = testdataall[each,:,2] > 0.1
ax.plot(metaall[:,2], flux_val, 'o',alpha =0.5,mfc = 'None', mec = 'r')
ax.plot(metaall[:,2][pick], flux_val[pick], 'kx',markersize = 10)
ax.plot(metaall[:,2], y_feh, 'k')
ind1 = argsort(metaall[:,2])
ax.fill_between(sort(metaall[:,2]), array(y_feh + std(flux_val))[ind1], array(y_feh - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("[Fe/H]", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_title(str(np.int((testdataall[each,0,0])))+" $\AA$")
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,2)
ax.plot(metaall[:,1], flux_val, 'o', alpha =0.5, mfc = 'None', mec = 'b')
ax.plot(metaall[:,1][pick], flux_val[pick], 'kx',markersize = 10)
ax.plot(metaall[:,1], y_g, 'k')
ind1 = argsort(metaall[:,1])
ax.fill_between(sort(metaall[:,1]), array(y_g + std(flux_val))[ind1], array(y_g - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("log g", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_title(str(np.int((testdataall[each,0,0])))+" $\AA$")
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,3)
ax.plot(metaall[:,0], flux_val, 'o',alpha =0.5, mfc = 'None', mec = 'green')
ax.plot(metaall[:,0][pick], flux_val[pick], 'kx', markersize = 10)
ax.plot(metaall[:,0], y_t, 'k')
ind1 = argsort(metaall[:,0])
ax.fill_between(sort(metaall[:,0]), array(y_t + std(flux_val))[ind1], array(y_t - std(flux_val))[ind1] , color = 'y', alpha = 0.2)
ax.set_xlabel("Teff", fontsize = 14 )
ax.set_ylabel(lab, fontsize = 14 )
ax.set_ylim(ylims[0], ylims[1])
#
ax = plt.subplot(3,2,4)
diff_flux = coeffs[each,0] - testdataall[each,:,1]
xrange1 = arange(0,shape(testdataall)[1],1)
ind1 = argsort(metaall[:,2])
ind1_pick = argsort(metaall[:,2][pick])
ax.plot(xrange1, (coeffs[each,0] - testdataall[each,:,1])[ind1], 'o',alpha = 0.5, mfc = 'None', mec = 'grey')
ax.plot(xrange1[pick], (coeffs[each,0] - testdataall[each,:,1][pick])[ind1_pick], 'kx',markersize = 10)
ax.fill_between(xrange1, array(mean(diff_flux) + std(diff_flux)), array(mean(diff_flux) - std(diff_flux)) , color = 'y', alpha = 0.2)
ax.set_xlabel("Star Number (increasing [Fe/H])", fontsize = 14 )
ax.set_ylabel("flux star - mean flux", fontsize = 14 )
ax.set_ylim(-1.0, 1.0)
#
ax = plt.subplot(3,2,5)
for indx, color, label in [
( 1, "g", "Teff"),
( 2, "b", "logg"),
( 3, "r", "FeH")]:
_plot_something(ax, testdataall[:, 0, 0][each-10:each+10], coeffs[:, indx][each-10:each+10], covs[:, indx, indx][each-10:each+10], color, label=label)
ax.axvline(testdataall[:,0,0][each],color = 'grey')
ax.axhline(0,color = 'grey',linestyle = 'dashed')
ax.set_xlim(testdataall[:,0,0][each-9], testdataall[:,0,0][each+9])
ax.legend(loc = 4,fontsize = 10)
ax.set_xlabel("Wavelength $\AA$", fontsize = 14 )
ax.set_ylabel("coeffs T,g,FeH", fontsize = 14 )
#
ax = plt.subplot(3,2,6)
_plot_something(ax, testdataall[:, 0, 0][each-10:each+10], coeffs[:, 0][each-10:each+10], covs[:, 0, 0][each-10:each+10], 'k', label='mean')
ax.set_ylim(0.6,1.1)
ax.set_xlim(testdataall[:,0,0][each-9], testdataall[:,0,0][each+9])
ax.legend(loc = 4,fontsize = 10)
ax.axvline(testdataall[:,0,0][each],color = 'grey')
ax.axhline(0,color = 'grey',linestyle = 'dashed')
ax.set_xlabel("Wavelength $\AA$", fontsize = 14 )
ax.set_ylabel("Mean flux", fontsize = 14 )
savefig3(fig, str(each)+"_"+str(namesave) , transparent=False, bbox_inches='tight', pad_inches=0.5)
fig.clf()
# return
def _plot_something(ax, wl, val, var, color, lw=2, label=""):
"""
This routine is for plotting
"""
factor = 1.
if label == "Teff": factor = 1000. # yes, I feel dirty; MAGIC
sig = np.sqrt(var)
ax.plot(wl, factor*(val+sig), color=color, lw=lw, label=label)
ax.plot(wl, factor*(val-sig), color=color, lw=lw)
ax.fill_between(wl, factor*(val+sig), factor*(val-sig), color = color, alpha = 0.2)
return None
def savefig3(fig, prefix, **kwargs):
# for suffix in (".png"):
suffix = ".png"
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix)#, **kwargs)
close()
def leave_one_cluster_out_xval(cluster_information):
# this is done in the fitspectra.py code- can look at this if want to implement it later and copy it directly across
dataall, metaall, labels = get_normalized_training_data()
for jj, cluster_indx in enumerate(clusters):
cluster_indx = something
pfn = "coeffs_%03d.pickle" % (jj)
# read_and_train(dataall, .., pfn, leave_out=cluster_indx)
# infer_labels(pfn, dataall[:, cluster_indx], ofn)
# plotting...
if __name__ == "__main__":
dataall, metaall, labels, Ametaall, cluster_name = get_normalized_training_data()
fpickle = "coeffs.pickle"
if not glob.glob(fpickle):
train(dataall, metaall, 1, fpickle, Ametaall, cluster_name, logg_cut= 40.,teff_cut = 0.)
fpickle2 = "coeffs_2nd_order.pickle"
if not glob.glob(fpickle2):
train(dataall, metaall, 2, fpickle2, Ametaall, cluster_name, logg_cut= 40.,teff_cut = 0.)
self_flag = 2
if self_flag < 1:
a = open('all.txt', 'r')
a = open('all_test2.txt', 'r')
al = a.readlines()
bl = []
for each in al:
bl.append(each.strip())
for each in bl:
testfile = each
field = testfile.split('.txt')[0]+'_' #"4332_"
testdataall = get_normalized_test_data(testfile) # if flag is one, do on self
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, field+"tags.pickle",-10.94,10.99)
if self_flag == 1:
field = "self_"
file_in = open('normed_data.pickle', 'r')
testdataall, metaall, labels = pickle.load(file_in)
lookatfits('coeffs.pickle',[1002],testdataall)
file_in.close()
testmetaall, inv_covars = infer_labels("coeffs.pickle", testdataall, field+"tags.pickle",-10.980,11.43)
if self_flag == 2:
field = "self_2nd_order_"
file_in = open('normed_data.pickle', 'r')
testdataall, metaall, labels, Ametaall, cluster_name = pickle.load(file_in)
file_in.close()
testmetaall, inv_covars = infer_labels_nonlinear("coeffs_2nd_order.pickle", testdataall, field+"tags.pickle",-10.950,10.99)
|
|
# -*- coding: utf-8 -*-
from urlparse import urlparse
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from website.models import NodeLog
from website.views import find_dashboard
from website.util import permissions
from website.util.sanitize import strip_html
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase, fake
from tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory
)
from tests.utils import assert_logs, assert_not_logs
class TestNodeDetail(ApiTestCase):
def setUp(self):
super(TestNodeDetail, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(title="Project One", is_public=True, creator=self.user)
self.private_project = ProjectFactory(title="Project Two", is_public=False, creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True)
self.public_component_url = '/{}nodes/{}/'.format(API_BASE, self.public_component._id)
def test_return_public_project_details_logged_out(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.public_project.title)
assert_equal(res.json['data']['attributes']['description'], self.public_project.description)
assert_equal(res.json['data']['attributes']['category'], self.public_project.category)
def test_return_public_project_details_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.public_project.title)
assert_equal(res.json['data']['attributes']['description'], self.public_project.description)
assert_equal(res.json['data']['attributes']['category'], self.public_project.category)
def test_return_private_project_details_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
def test_return_private_project_details_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.private_project.title)
assert_equal(res.json['data']['attributes']['description'], self.private_project.description)
assert_equal(res.json['data']['attributes']['category'], self.private_project.category)
def test_return_private_project_details_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_top_level_project_has_no_parent(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['relationships']['parent']['links']['related']['href'], None)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_child_project_has_parent(self):
public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(API_BASE, public_component._id)
res = self.app.get(public_component_url)
assert_equal(res.status_code, 200)
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert_equal(urlparse(url).path, self.public_url)
def test_node_has_children_link(self):
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = self.public_url + 'children/'
assert_equal(urlparse(url).path, expected_url)
def test_node_has_contributors_link(self):
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = self.public_url + 'contributors/'
assert_equal(urlparse(url).path, expected_url)
def test_node_has_pointers_link(self):
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = self.public_url + 'node_links/'
assert_equal(urlparse(url).path, expected_url)
def test_node_has_registrations_link(self):
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = self.public_url + 'registrations/'
assert_equal(urlparse(url).path, expected_url)
def test_node_has_files_link(self):
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = self.public_url + 'files/'
assert_equal(urlparse(url).path, expected_url)
def test_node_properties(self):
res = self.app.get(self.public_url)
assert_equal(res.json['data']['attributes']['public'], True)
assert_equal(res.json['data']['attributes']['registration'], False)
assert_equal(res.json['data']['attributes']['collection'], False)
assert_equal(res.json['data']['attributes']['dashboard'], False)
assert_equal(res.json['data']['attributes']['tags'], [])
def test_requesting_folder_returns_error(self):
folder = NodeFactory(is_folder=True, creator=self.user)
res = self.app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
class NodeCRUDTestCase(ApiTestCase):
def setUp(self):
super(NodeCRUDTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.title = 'Cool Project'
self.new_title = 'Super Cool Project'
self.description = 'A Properly Cool Project'
self.new_description = 'An even cooler project'
self.category = 'data'
self.new_category = 'project'
self.public_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=True,
creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=False,
creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345')
def make_node_payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
class TestNodeUpdate(NodeCRUDTestCase):
def test_node_update_invalid_data(self):
res = self.app.put_json_api(self.public_url, "Incorrect data", auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
res = self.app.put_json_api(self.public_url, ["Incorrect data"], auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
@assert_not_logs(NodeLog.MADE_PUBLIC, 'private_project')
def test_cannot_make_project_public_if_non_contributor(self):
non_contrib = AuthUserFactory()
res = self.app.patch_json(
self.private_url,
make_node_payload(self.private_project, {'public': True}),
auth=non_contrib.auth, expect_errors=True
)
assert_equal(res.status_code, 403)
def test_cannot_make_project_public_if_non_admin_contributor(self):
non_admin = AuthUserFactory()
self.private_project.add_contributor(
non_admin,
permissions=(permissions.READ, permissions.WRITE),
auth=Auth(self.private_project.creator)
)
self.private_project.save()
res = self.app.patch_json(
self.private_url,
make_node_payload(self.private_project, {'public': True}),
auth=non_admin.auth, expect_errors=True
)
assert_equal(res.status_code, 403)
self.private_project.reload()
assert_false(self.private_project.is_public)
@assert_logs(NodeLog.MADE_PUBLIC, 'private_project')
def test_can_make_project_public_if_admin_contributor(self):
admin_user = AuthUserFactory()
self.private_project.add_contributor(
admin_user,
permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN),
auth=Auth(self.private_project.creator)
)
self.private_project.save()
res = self.app.patch_json_api(
self.private_url,
make_node_payload(self.private_project, {'public': True}),
auth=admin_user.auth # self.user is creator/admin
)
assert_equal(res.status_code, 200)
self.private_project.reload()
assert_true(self.private_project.is_public)
def test_update_project_properties_not_nested(self):
res = self.app.put_json_api(self.public_url, {
'id': self.public_project._id,
'type': 'nodes',
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True,
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_update_invalid_id(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_update_invalid_type(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'node',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_update_no_id(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id')
def test_update_no_type(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_update_public_project_logged_out(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.UPDATED_FIELDS, 'public_project')
def test_update_public_project_logged_in(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.new_title)
assert_equal(res.json['data']['attributes']['description'], self.new_description)
assert_equal(res.json['data']['attributes']['category'], self.new_category)
def test_update_public_project_logged_in_but_unauthorized(self):
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': True
}
}
}, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_cannot_update_a_registration(self):
registration = RegistrationFactory(project=self.public_project, creator=self.user)
original_title = registration.title
original_description = registration.description
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = self.app.put_json_api(url, {
'data': {
'id': registration._id,
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'hypothesis',
'public': True
}
}
}, auth=self.user.auth, expect_errors=True)
registration.reload()
assert_equal(res.status_code, 403)
assert_equal(registration.title, original_title)
assert_equal(registration.description, original_description)
def test_update_private_project_logged_out(self):
res = self.app.put_json_api(self.private_url, {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': False
}
}
}, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.UPDATED_FIELDS, 'private_project')
def test_update_private_project_logged_in_contributor(self):
res = self.app.put_json_api(self.private_url, {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': False
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.new_title)
assert_equal(res.json['data']['attributes']['description'], self.new_description)
assert_equal(res.json['data']['attributes']['category'], self.new_category)
def test_update_private_project_logged_in_non_contributor(self):
res = self.app.put_json_api(self.private_url, {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
'description': self.new_description,
'category': self.new_category,
'public': False
}
}
}, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.UPDATED_FIELDS, 'public_project')
def test_update_project_sanitizes_html_properly(self):
"""Post request should update resource, and any HTML in fields should be stripped"""
new_title = '<strong>Super</strong> Cool Project'
new_description = 'An <script>alert("even cooler")</script> project'
res = self.app.put_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': self.new_category,
'public': True,
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], strip_html(new_title))
assert_equal(res.json['data']['attributes']['description'], strip_html(new_description))
@assert_logs(NodeLog.EDITED_TITLE, 'public_project')
def test_partial_update_project_updates_project_correctly_and_sanitizes_html(self):
new_title = 'An <script>alert("even cooler")</script> project'
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], strip_html(new_title))
assert_equal(res.json['data']['attributes']['description'], self.description)
assert_equal(res.json['data']['attributes']['category'], self.category)
def test_write_to_public_field_non_contrib_forbidden(self):
# Test non-contrib writing to public field
res = self.app.patch_json_api(self.public_url, {
'data': {
'attributes': {
'public': False},
'id': self.public_project._id,
'type': 'nodes'
}
}, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_partial_update_public_project_logged_out(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title
}
}
}, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.EDITED_TITLE, 'public_project')
def test_partial_update_public_project_logged_in(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title,
}
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.new_title)
assert_equal(res.json['data']['attributes']['description'], self.description)
assert_equal(res.json['data']['attributes']['category'], self.category)
def test_partial_update_public_project_logged_in_but_unauthorized(self):
# Public resource, logged in, unauthorized
res = self.app.patch_json_api(self.public_url, {
'data': {
'attributes': {
'title': self.new_title},
'id': self.public_project._id,
'type': 'nodes',
}
}, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_partial_update_private_project_logged_out(self):
res = self.app.patch_json_api(self.private_url, {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'title': self.new_title
}
}
}, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.EDITED_TITLE, 'private_project')
def test_partial_update_private_project_logged_in_contributor(self):
res = self.app.patch_json_api(self.private_url, {
'data': {
'attributes': {
'title': self.new_title},
'id': self.private_project._id,
'type': 'nodes',
}
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['data']['attributes']['title'], self.new_title)
assert_equal(res.json['data']['attributes']['description'], self.description)
assert_equal(res.json['data']['attributes']['category'], self.category)
def test_partial_update_private_project_logged_in_non_contributor(self):
res = self.app.patch_json_api(self.private_url, {
'data': {
'attributes': {
'title': self.new_title},
'id': self.private_project._id,
'type': 'nodes',
}
}, auth=self.user_two.auth,expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
def test_partial_update_invalid_id(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': self.new_title,
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_partial_update_invalid_type(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'node',
'attributes': {
'title': self.new_title,
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_partial_update_no_id(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'type': 'nodes',
'attributes': {
'title': self.new_title,
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/id')
def test_partial_update_no_type(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'attributes': {
'title': self.new_title,
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
# Nothing will be updated here
def test_partial_update_project_properties_not_nested(self):
res = self.app.patch_json_api(self.public_url, {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'title': self.new_title,
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_update_project_invalid_title(self):
project = {
'data': {
'type': 'nodes',
'id': self.public_project._id,
'attributes': {
'title': 'A' * 201,
'category': 'project',
}
}
}
res = self.app.put_json_api(self.public_url, project, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Title cannot exceed 200 characters.')
class TestNodeDelete(NodeCRUDTestCase):
def test_deletes_public_node_logged_out(self):
res = self.app.delete(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert 'detail' in res.json['errors'][0]
def test_requesting_deleted_returns_410(self):
self.public_project.is_deleted = True
self.public_project.save()
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 410)
assert 'detail' in res.json['errors'][0]
def test_deletes_public_node_fails_if_unauthorized(self):
res = self.app.delete_json_api(self.public_url, auth=self.user_two.auth, expect_errors=True)
self.public_project.reload()
assert_equal(res.status_code, 403)
assert_equal(self.public_project.is_deleted, False)
assert 'detail' in res.json['errors'][0]
@assert_logs(NodeLog.PROJECT_DELETED, 'public_project')
def test_deletes_public_node_succeeds_as_owner(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(self.public_project.is_deleted, True)
def test_deletes_private_node_logged_out(self):
res = self.app.delete(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert 'detail' in res.json['errors'][0]
@assert_logs(NodeLog.PROJECT_DELETED, 'private_project')
def test_deletes_private_node_logged_in_contributor(self):
res = self.app.delete(self.private_url, auth=self.user.auth, expect_errors=True)
self.private_project.reload()
assert_equal(res.status_code, 204)
assert_equal(self.private_project.is_deleted, True)
def test_deletes_private_node_logged_in_non_contributor(self):
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
self.private_project.reload()
assert_equal(res.status_code, 403)
assert_equal(self.private_project.is_deleted, False)
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_read_only_contributor(self):
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ])
self.private_project.save()
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
self.private_project.reload()
assert_equal(res.status_code, 403)
assert_equal(self.private_project.is_deleted, False)
assert 'detail' in res.json['errors'][0]
def test_deletes_invalid_node(self):
res = self.app.delete(self.fake_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert 'detail' in res.json['errors'][0]
def test_delete_project_with_component_returns_error(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user)
# Return a 400 because component must be deleted before deleting the parent
res = self.app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, project._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(
errors[0]['detail'],
'Any child components must be deleted prior to deleting this project.'
)
def test_delete_dashboard_returns_error(self):
dashboard_node = find_dashboard(self.user)
res = self.app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, dashboard_node._id),
auth=self.user.auth,
expect_errors=True
)
# Dashboards are a folder, so a 404 is returned
assert_equal(res.status_code, 404)
class TestReturnDeletedNode(ApiTestCase):
def setUp(self):
super(TestReturnDeletedNode, self).setUp()
self.user = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.public_deleted = ProjectFactory(is_deleted=True, creator=self.user,
title='This public project has been deleted', category='project',
is_public=True)
self.private_deleted = ProjectFactory(is_deleted=True, creator=self.user,
title='This private project has been deleted', category='project',
is_public=False)
self.private = ProjectFactory(is_public=False, creator=self.user, title='A boring project', category='project')
self.public = ProjectFactory(is_public=True, creator=self.user, title='A fun project', category='project')
self.new_title = 'This deleted node has been edited'
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_deleted._id)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_deleted._id)
def test_return_deleted_public_node(self):
res = self.app.get(self.public_url, expect_errors=True)
assert_equal(res.status_code, 410)
def test_return_deleted_private_node(self):
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 410)
def test_edit_deleted_public_node(self):
res = self.app.put_json_api(self.public_url, params={'title': self.new_title,
'node_id': self.public_deleted._id,
'category': self.public_deleted.category},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 410)
def test_edit_deleted_private_node(self):
res = self.app.put_json_api(self.private_url, params={'title': self.new_title,
'node_id': self.private_deleted._id,
'category': self.private_deleted.category},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 410)
def test_delete_deleted_public_node(self):
res = self.app.delete(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 410)
def test_delete_deleted_private_node(self):
res = self.app.delete(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 410)
class TestNodeTags(ApiTestCase):
def setUp(self):
super(TestNodeTags, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.read_only_contributor = AuthUserFactory()
self.public_project = ProjectFactory(title="Project One", is_public=True, creator=self.user)
self.public_project.add_contributor(self.user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
self.private_project = ProjectFactory(title="Project Two", is_public=False, creator=self.user)
self.private_project.add_contributor(self.user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.one_new_tag_json = {
'data': {
'id': self.public_project._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
self.private_payload = {
'data': {
'id': self.private_project._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
def test_public_project_starts_with_no_tags(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 0)
@assert_logs(NodeLog.TAG_ADDED, 'public_project')
def test_contributor_can_add_tag_to_public_project(self):
res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
# Ensure data is correct from the PATCH response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag')
# Ensure data is correct in the database
self.public_project.reload()
assert_equal(len(self.public_project.tags), 1)
assert_equal(self.public_project.tags[0]._id, 'new-tag')
# Ensure data is correct when GETting the resource again
reload_res = self.app.get(self.public_url)
assert_equal(len(reload_res.json['data']['attributes']['tags']), 1)
assert_equal(reload_res.json['data']['attributes']['tags'][0], 'new-tag')
@assert_logs(NodeLog.TAG_ADDED, 'private_project')
def test_contributor_can_add_tag_to_private_project(self):
res = self.app.patch_json_api(self.private_url, self.private_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure data is correct from the PATCH response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag')
# Ensure data is correct in the database
self.private_project.reload()
assert_equal(len(self.private_project.tags), 1)
assert_equal(self.private_project.tags[0]._id, 'new-tag')
# Ensure data is correct when GETting the resource again
reload_res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(reload_res.json['data']['attributes']['tags']), 1)
assert_equal(reload_res.json['data']['attributes']['tags'][0], 'new-tag')
def test_non_authenticated_user_cannot_add_tag_to_public_project(self):
res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=None)
assert_equal(res.status_code, 401)
def test_non_authenticated_user_cannot_add_tag_to_private_project(self):
res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=None)
assert_equal(res.status_code, 401)
def test_non_contributor_cannot_add_tag_to_public_project(self):
res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=self.user_two.auth)
assert_equal(res.status_code, 403)
def test_non_contributor_cannot_add_tag_to_private_project(self):
res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=self.user_two.auth)
assert_equal(res.status_code, 403)
def test_read_only_contributor_cannot_add_tag_to_public_project(self):
res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, expect_errors=True, auth=self.read_only_contributor.auth)
assert_equal(res.status_code, 403)
def test_read_only_contributor_cannot_add_tag_to_private_project(self):
res = self.app.patch_json_api(self.private_url, self.private_payload, expect_errors=True, auth=self.read_only_contributor.auth)
assert_equal(res.status_code, 403)\
@assert_logs(NodeLog.TAG_ADDED, 'private_project', -4)
@assert_logs(NodeLog.TAG_ADDED, 'private_project', -3)
@assert_logs(NodeLog.TAG_REMOVED, 'private_project', -2)
@assert_logs(NodeLog.TAG_REMOVED, 'private_project')
def test_tags_add_and_remove_properly(self):
res = self.app.patch_json_api(self.private_url, self.private_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Ensure adding tag data is correct from the PATCH response
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'new-tag')
# Ensure removing and adding tag data is correct from the PATCH response
res = self.app.patch_json_api(self.private_url, {'data': {'id': self.private_project._id, 'type':'nodes', 'attributes': {'tags':['newer-tag']}}}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 1)
assert_equal(res.json['data']['attributes']['tags'][0], 'newer-tag')
# Ensure removing tag data is correct from the PATCH response
res = self.app.patch_json_api(self.private_url, {'data': {'id': self.private_project._id, 'type':'nodes', 'attributes': {'tags': []}}}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']['attributes']['tags']), 0)
def test_tags_post_object_instead_of_list(self):
url = '/{}nodes/'.format(API_BASE)
payload = {'data': {
'type': 'nodes',
'attributes': {
'title': 'new title',
'category': 'project',
'tags': {'foo': 'bar'}
}
}}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_tags_patch_object_instead_of_list(self):
self.one_new_tag_json['data']['attributes']['tags'] = {'foo': 'bar'}
res = self.app.patch_json_api(self.public_url, self.one_new_tag_json, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
|
|
'''
LICENSING
-------------------------------------------------
loopa: Arduino-esque event loop app framework, and other utilities.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import logging
import asyncio
import threading
import traceback
import concurrent.futures
# ###############################################
# Boilerplate
# ###############################################
# Control * imports.
__all__ = [
]
logger = logging.getLogger(__name__)
# ###############################################
# Misc
# ###############################################
def default_to(check, default, comparator=None):
''' If check is None, apply default; else, return check.
'''
if comparator is None:
if check is None:
return default
else:
return check
else:
if check == comparator:
return default
else:
return check
# ###############################################
# Lib
# ###############################################
def harvest_background_task(task):
''' Looks at a completed background task. If it has a result, logs
it to debug. If it has an error, logs the error.
'''
exc = task.exception()
if exc is None:
result = task.result()
# If there was no result, then debug the completion.
if result is None:
logger.debug(
'Background task completed successfully with no result: ' +
repr(task)
)
# There was a result, so upgrade it to info.
else:
logger.info(''.join((
'Background task completed successfully. Result: ',
repr(result),
' (background results are always ignored). Task: ',
repr(task)
)))
# The task did not complete successfully. Log the error.
else:
creation_trace = task._source_traceback
if creation_trace:
creation_info = 'Task created at: \n' + \
''.join(creation_trace.format()) + '\n'
else:
creation_info = ''
logger.error(
'Background task DID NOT COMPLETE. ' + creation_info +
'Exception traceback:\n' +
''.join(traceback.format_exception(type(exc), exc,
exc.__traceback__))
)
def make_background_future(*args, **kwargs):
''' Runs asyncio's ensure_future, and then adds a callback to
harvest_background_task, and returns the task. Argspec is identical
to ensure_future.
'''
task = asyncio.ensure_future(*args, **kwargs)
task.add_done_callback(harvest_background_task)
return task
class _WrappedEvent(asyncio.Event):
''' Adds an internal future to the event and gives it any expected
methods.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__result = None
self.__exc = None
def set(self, result, exc):
''' Sets us up with a result and an exception.
'''
self.__result = result
self.__exc = exc
super().set()
async def wait(self):
''' Waits for self, and then returns our result, or raises our
exception.
'''
await super().wait()
if self.__exc is not None:
raise self.__exc
else:
return self.__result
def wrap_threaded_future(fut, loop=None):
''' Wraps a threaded future in an async future.
'''
# Create an event on our source loop.
if loop is None:
loop = asyncio.get_event_loop()
source_event = _WrappedEvent()
# Create a callback to set the source event's infos
def callback(fut, loop=loop, source_event=source_event):
exc = None
result = None
try:
exc = fut.exception()
# Only get the result if there was no exception, or this will raise
# the exception.
if exc is None:
result = fut.result()
except concurrent.futures.CancelledError as cancelled:
exc = cancelled
finally:
# This is what actually passes the values on
loop.call_soon_threadsafe(source_event.set, result, exc)
# This will also be called if the fut is cancelled.
fut.add_done_callback(callback)
# Now wrap the event's wait into a future and return it
return asyncio.ensure_future(source_event.wait())
def wait_threadsafe(fut):
''' Wait for the result of an asyncio future from synchronous code.
Returns it as soon as available.
'''
event = threading.Event()
results = []
# Create a callback to set the event and extract the results
def callback(fut, event=event):
exc = None
result = None
try:
exc = fut.exception()
# Only get the result if there was no exception, or this will raise
# the exception.
if exc is None:
result = exc.result()
except concurrent.futures.CancelledError as cancelled:
exc = cancelled
finally:
results.append(result)
results.append(exc)
event.set()
# Add the callback to the future (it will always be run from within the
# event loop). But make sure to do so in a threadsafe way. Hot damn this is
# messy.
fut._loop.call_soon_threadsafe(
fut.add_done_callback,
callback
)
# Now wait for completion and return the exception or result.
event.wait()
result, exc = results
if exc:
raise exc
else:
return result
async def run_coroutine_loopsafe(coro, loop):
''' Threadsafe, asyncsafe (ie non-loop-blocking) call to run a coro
in a different event loop. Returns a future that can be awaited from
within the current loop.
'''
# This returns a concurrent.futures.Future, so we need to wait for it, but
# we cannot block our event loop, soooo...
thread_future = asyncio.run_coroutine_threadsafe(coro, loop)
return wrap_threaded_future(thread_future)
async def await_coroutine_loopsafe(coro, loop, timeout=None):
''' Wrapper around run_coroutine_loopsafe that actuall returns the
result of the coro (or raises its exception).
'''
async_future = run_coroutine_loopsafe(coro, loop)
return (await asyncio.wait_for(async_future, timeout=timeout))
def await_coroutine_threadsafe(coro, loop):
''' Wrapper on asyncio.run_coroutine_threadsafe that makes a coro
behave as if it were called synchronously. In other words, instead
of returning a future, it raises the exception or returns the coro's
result.
Leaving loop as default None will result in asyncio inferring the
loop from the default from the current context (aka usually thread).
'''
fut = asyncio.run_coroutine_threadsafe(
coro = coro,
loop = loop
)
# Block on completion of coroutine and then raise any created exception
exc = fut.exception()
if exc:
raise exc
return fut.result()
def triplicated(func):
''' Decorator to make a threadsafe and loopsafe copy of the
decorated function.
'''
func.__triplicate__ = True
return func
# Note: if either Triplicate name, or type subclass changes, need to update
# bottom of __new__
class Triplicate(type):
''' Metaclass to handle creation of triplicated (async, threadsafe,
loopsafe) functions. It will not affect subclasses -- ie, subs will
have no idea that their parent was created through triplication, and
are therefore free to alter their metaclass as they'd like. BUT, as
a flipside, subclasses must explicitly re-declare a Triplicate
metaclass, if they want to.
'''
def __new__(mcls, clsname, bases, namespace, *args, **kwargs):
''' Modify the existing namespace: create a triplicate API for
every @triplicate function.
'''
threadsafe_suffix = '_threadsafe'
loopsafe_suffix = '_loopsafe'
triplicates = {}
for name, obj in namespace.items():
# Only do this for triplicate-decorated functions.
if hasattr(obj, '__triplicate__'):
# Create a threadsafe version, memoizing the source coro.
def threadsafe(self, *args, src_coro=obj, **kwargs):
''' Auto-generated threadsafe function for a
(async, threadsafe, loopsafe) API.
'''
# Note that, because the src_coro is unbound, we have to
# pass an explicit self.
return await_coroutine_threadsafe(
coro = src_coro(self, *args, **kwargs),
loop = self._loop
)
# Create a loopsafe version, memoizing the source coro.
async def loopsafe(self, *args, src_coro=obj, **kwargs):
''' Auto-generated loopsafe function for a
triplicate (async, threadsafe, loopsafe) API.
'''
# Note that, because the src_coro is unbound, we have to
# pass an explicit self.
return (await await_coroutine_loopsafe(
coro = src_coro(self, *args, **kwargs),
target_loop = self._loop
))
# We can't update namespace while iterating over it, so put
# those into a temp dict.
threadsafe_name = name + threadsafe_suffix
loopsafe_name = name + loopsafe_suffix
triplicates[threadsafe_name] = threadsafe
triplicates[loopsafe_name] = loopsafe
# Now that we're done iterating, add back in the rest of the API.
namespace.update(triplicates)
# Return the super()ed class.
return super().__new__(mcls, clsname, bases, namespace,
*args, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 15 16:51:46 2017
@author: mkonrad
"""
import pytest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from scipy.stats import chisquare
from pdftabextract.clustering import (find_clusters_1d_break_dist, zip_clusters_and_values, calc_cluster_centers_1d,
array_match_difference_1d, find_best_matching_array, adjust_bad_positions)
@given(st.lists(st.integers(min_value=-10000, max_value=10000)),
st.integers(min_value=-10000, max_value=10000))
def test_find_clusters_1d_break_dist(seq, delta):
with pytest.raises(TypeError): # first param must be np.array
find_clusters_1d_break_dist(seq, delta)
arr = np.array(seq)
if delta < 0:
with pytest.raises(ValueError): # delta must be >= 0
find_clusters_1d_break_dist(arr, delta)
return
clusts = find_clusters_1d_break_dist(arr, delta)
# types and return length must match
assert type(clusts) is list
assert sum(map(len, clusts)) == len(seq)
idx_list = []
for c in clusts:
idx_list.extend(c)
assert len(idx_list) == len(seq)
recon = arr[idx_list]
recon_sorted = np.sort(recon)
seq_sorted = np.sort(seq)
# values in clusters and in input must match
assert np.array_equal(recon_sorted, seq_sorted)
if len(seq) > 1:
clust_borders = []
for c in clusts:
v = arr[c]
# inside clusters, the gaps must be < delta
if len(v) > 1:
max_dist_in_clust = max(np.diff(np.sort(v)))
assert max_dist_in_clust < delta
v_min = np.min(v)
v_max = np.max(v)
clust_borders.append((v_min, v_max))
clust_borders = sorted(clust_borders, key=lambda x: x[0])
if len(clusts) > 1:
# between the clusters, the gaps must be >= delta
gaps = []
prev_max = None
for v_min, v_max in clust_borders:
if prev_max is not None:
gaps.append(v_min - prev_max)
prev_max = v_max
assert min(gaps) >= delta
@given(st.lists(st.integers(min_value=-10000, max_value=10000)),
st.integers(min_value=-10000, max_value=10000))
def test_zip_clusters_and_values(seq, delta):
arr = np.array(seq)
try:
clusts = find_clusters_1d_break_dist(arr, delta)
except: # exceptions are tested in test_find_clusters_1d_break_dist
return
with pytest.raises(TypeError): # second param must be np.array
zip_clusters_and_values(clusts, seq)
clusts_w_vals = zip_clusters_and_values(clusts, arr)
assert len(clusts_w_vals) == len(clusts)
for tup in clusts_w_vals:
assert len(tup) == 2
ind, vals = tup
assert len(ind) > 0
assert len(ind) == len(vals)
assert np.array_equal(arr[ind], vals)
@given(st.lists(st.integers(min_value=-10000, max_value=10000)),
st.integers(min_value=-10000, max_value=10000))
def test_calc_cluster_centers_1d(seq, delta):
arr = np.array(seq)
try:
clusts = find_clusters_1d_break_dist(arr, delta)
clusts_w_vals = zip_clusters_and_values(clusts, arr)
except: # exceptions are tested in test_find_clusters_1d_break_dist and test_zip_clusters_and_values
return
centers = calc_cluster_centers_1d(clusts_w_vals)
assert len(centers) == len(clusts_w_vals)
for c, (_, vals) in zip(centers, clusts_w_vals):
assert c == np.median(vals)
@given(st.lists(st.integers(min_value=-10000, max_value=10000), average_size=100),
st.lists(st.integers(min_value=-10000, max_value=10000), average_size=100),
st.booleans(),
st.booleans())
def test_array_match_difference_1d(l1, l2, l1_to_arr, l2_to_arr):
if l1_to_arr:
l1 = np.array(l1)
if l2_to_arr:
l2 = np.array(l2)
if len(l1) != len(l2):
with pytest.raises(ValueError): # lengths must be the same
array_match_difference_1d(l1, l2)
return
if len(l1) == 0:
with pytest.raises(ValueError): # lengths must be > 0
array_match_difference_1d(l1, l2)
return
diff1 = array_match_difference_1d(l1, l2)
assert diff1 == array_match_difference_1d(l2, l1)
assert diff1 == np.sum(np.abs(np.array(l1) - np.array(l2)))
def test_find_best_matching_array():
values = [
[0, 10, 30, 40],
[0, 11, 29, 42],
[10, 21, 25, 39, 52],
[0, 9, 15, 29, 32, 41],
[0, 10, 29, 35, 36, 40],
[0, 9, 41],
[0, 33, ],
]
correct_results = [
([0, 11, 29, 42], 4),
([10, 21, 39, 52], 4),
([0, 9, 29, 41], 3),
([0, 10, 29, 40], 1),
([0, 9, 30, 41], 2),
([0, 10, 33, 40], 3)
]
model = np.array(values[0])
for i, row in enumerate(values[1:]):
row = np.array(row)
corrected_row, diffsum = find_best_matching_array(row, model)
corr_res_row, corr_diffsum = correct_results[i]
assert np.array_equal(corrected_row, corr_res_row)
assert diffsum == corr_diffsum
def test_find_best_matching_array_exceptions():
with pytest.raises(TypeError):
find_best_matching_array([1, 2, 3], np.array([1, 2, 3]))
with pytest.raises(TypeError):
find_best_matching_array(np.array([1, 2, 3]), [1, 2, 3])
with pytest.raises(ValueError):
find_best_matching_array(np.array([]), np.array([1, 2, 3]))
with pytest.raises(ValueError):
find_best_matching_array(np.array([1, 2, 3]), np.array([]))
@given(st.lists(st.integers(min_value=-10000, max_value=10000), min_size=1, average_size=10, max_size=20),
st.lists(st.lists(st.integers(min_value=-10000, max_value=10000), min_size=1, average_size=10, max_size=20), min_size=1, average_size=10, max_size=20))
def test_find_best_matching_array_hypothesis(model, trials):
model = np.array(model)
for row in trials:
row = np.array(row)
corrected_row, diffsum = find_best_matching_array(row, model)
assert len(corrected_row) == len(model)
assert diffsum >= 0
def test_adjust_bad_positions():
pages_positions = {
0: [8, 28, 33, 38],
1: [10, 30, 35, 40],
2: [10, 30, 35, 40],
3: [0, 20, 25, 32],
4: [3, 21, 25, 31],
5: [3, 21, 25, 31],
}
mean_widths = np.diff([np.mean(pos) for pos in zip(*pages_positions.values())])
pages_positions.update({
6: [3, 21, 20, 31], # bad: neg. width
7: [3, 21, 25, 28, 31], # bad: too many positions
8: [3, 21, 25, 70], # bad: invalid last position
})
alpha = 0.05
adj_positions = adjust_bad_positions(pages_positions, pos_check_signif_level=alpha)
assert pages_positions.keys() == adj_positions.keys()
for p_num in pages_positions.keys():
orig = pages_positions[p_num]
adj = adj_positions[p_num]
assert len(adj) == 4
assert adj[0] == orig[0]
adj_widths = np.diff(adj)
_, p_val = chisquare(adj_widths, mean_widths)
assert p_val >= alpha
|
|
# -*- coding: utf-8 -*-
from os.path import dirname, relpath, join
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equals, assert_is_instance, \
assert_raises_regexp, with_setup
from diylisp.interpreter import interpret, interpret_file
from diylisp.types import Environment, String, LispError, Closure
from diylisp.parser import parse
env = None
def prepare_env():
global env
env = Environment()
path = join(dirname(relpath(__file__)), '..', 'stdlib.diy')
interpret_file(path, env)
"""
In this last part, we provide tests for some suggestions on how to improve
the language a bit. Treat these tasks as optional, and suggestions only.
Feel free to do something completely different, if you fancy.
"""
"""
Suggestion 1: `cond`
First off, we will implement a new control structure found in most Lisps, the
`cond` form (not to be confused with `cons`). The name `cond` is short for
"conditional", and is sort of a buffed up version of `if`.
Implement this as a new case in the `evaluate` function in `evaluator.py`.
"""
@with_setup(prepare_env)
def test_cond_returns_right_branch():
"""
`cond` takes as arguments a list of tuples (two-element lists, or "conses").
The first element of each tuple is evaluated in order, until one evaluates
to `#t`. The second element of that tuple is returned.
"""
program = """
(cond ((#f 'foo)
(#t 'bar)
(#f 'baz)))
"""
assert_equals("bar", interpret(program, env))
@with_setup(prepare_env)
def test_cond_dosnt_evaluate_all_branches():
"""
Of all the second tuple elements, only the one we return is ever evaluated.
"""
interpret("(define foo 42)", env)
program = """
(cond ((#f fire-the-missiles)
(#t foo)
(#f something-else-we-wont-do)))
"""
assert_equals("42", interpret(program, env))
@with_setup(prepare_env)
def test_cond_not_evaluating_more_predicateds_than_neccessary():
"""
Once we find a predicate that evaluates to `#t`, no more predicates should
be evaluated.
"""
program = """
(cond ((#f 1)
(#t 2)
(dont-evaluate-me! 3)))
"""
assert_equals("2", interpret(program, env))
@with_setup(prepare_env)
def test_cond_evaluates_predicates():
"""
Remember to evaluate the predicates before checking whether they are true.
"""
program = """
(cond (((not #t) 'totally-not-true)
((> 4 3) 'tru-dat)))
"""
assert_equals("tru-dat", interpret(program, env))
@with_setup(prepare_env)
def test_cond_returnes_false_as_default():
"""
If we evalaute all the predicates, only to find that none of them turned out
to be true, then `cond` should return `#f`.
"""
program = """
(cond ((#f 'no)
(#f 'nope)
(#f 'i-dont-even)))
"""
assert_equals("#f", interpret(program, env))
"""
Suggestion 2: Strings
So far, our new language has been missing a central data type, one that no
real language could do without -- strings. So, lets add them to the language.
"""
@with_setup(prepare_env)
def test_parsing_simple_strings():
"""
First things first, we need to be able to parse the strings.
Since we already use python strings for our symbols, we need something else.
Lets use a simple data type, `String`, which you will (rather conveniently)
find ready made in the file `types.py`.
> Side note:
>
> This is where it starts to show that we could have used smarter
> representation of our types. We wanted to keep things simple early on,
> and now we pay the price. We could have represented our types as tuples
> of type and value, or perhaps made classes for all of them.
>
> Feel free to go back and fix this. Refactor as much as you wish -- just
> remember to update the tests accordingly.
"""
ast = parse('"foo bar"')
assert_is_instance(ast, String)
assert_equals("foo bar", ast.val)
@with_setup(prepare_env)
def test_parsing_empty_string():
"""
Empty strings are strings too!
"""
assert_equals('', parse('""').val)
@with_setup(prepare_env)
def test_parsing_strings_with_escaped_double_quotes():
"""
We should be able to create strings with "-characters by escaping them.
"""
ast = parse('"Say \\"what\\" one more time!"')
assert_is_instance(ast, String)
assert_equals('Say \\"what\\" one more time!', ast.val)
@with_setup(prepare_env)
def test_parsing_unclosed_strings():
"""
Strings that are not closed result in an parse error.
"""
with assert_raises_regexp(LispError, 'Unclosed string'):
parse('"hey, close me!')
@with_setup(prepare_env)
def test_parsing_strings_are_closed_by_first_closing_quotes():
"""
Strings are delimited by the first and last (unescaped) double quotes.
Thus, unescaped quotes followed by anything at all should be considered
invalid and throw an exception.
"""
with assert_raises_regexp(LispError, 'Expected EOF'):
parse('"foo" bar"')
@with_setup(prepare_env)
def test_parsing_strings_with_parens_in_them():
"""
Strings should be allowed to contain parens.
The parser, so far, rather naively counts parens to determine the end of
a list. We need to make a small adjustment to make it knows not to consider
parens within strings.
Tip: You'll probably need to change the function `find_matching_paren` in
`parser.py` to solve this.
"""
actual = parse("(define foo \"string with a ) inside it\")")
expected = ["define", "foo", String("string with a ) inside it")]
assert_equals(expected, actual)
@with_setup(prepare_env)
def test_parsing_of_strings():
"""
A final sanity check, to make sure parsing strings works.
This test should already pass if you've done the above correctly.
"""
program = "(head '(#t \"()((()()) wtf \\\" ())) is )))) ()() going on \"))"
assert_equals("#t", interpret(program))
@with_setup(prepare_env)
def test_evaluating_strings():
"""
Strings is one of the basic data types, and thus an atom. Strings should
therefore evaluate to themselves.
"""
random_quote = '"The limits of my language means the limits of my world."'
assert_equals(random_quote, interpret(random_quote, env))
@with_setup(prepare_env)
def test_empty_strings_behave_as_empty_lists():
"""
It is common in many languages for strings to behave as lists. This can be
rather convenient, so let's make it that way here as well.
We have four basic list functions: `cons`, `head`, `tail` and `empty`.
To take the easy one first: `empty` should only return `#t` for the empty
string (and empty lists, as before).
"""
assert_equals("#t", interpret('(empty "")'))
assert_equals("#f", interpret('(empty "not empty")'))
@with_setup(prepare_env)
def test_strings_have_heads_and_tails():
"""
Next, `head` and `tail` needs to extract the first character and the rest
of the charactes, respectively, from the string.
"""
assert_equals('"f"', interpret('(head "foobar")'))
assert_equals('"oobar"', interpret('(tail "foobar")'))
@with_setup(prepare_env)
def test_consing_strings_back_together():
"""
Finally, we need to be able to reconstruct a string from its head and tail
"""
assert_equals('"foobar"', interpret('(cons "f" "oobar")'))
"""
Suggestion 3: `let`
The `let` form enables us to make local bindings.
It takes two arguments. First a list of bindings, secondly an expression to be
evaluated within an environment where those bindings exist.
"""
@with_setup(prepare_env)
def test_let_returns_result_of_the_given_expression():
"""
The result when evaluating a `let` binding is the evaluation of the
expression given as argument.
Let's first try one without any bindings.
"""
program = "(let () (if #t 'yep 'nope))"
assert_equals("yep", interpret(program, env))
@with_setup(prepare_env)
def test_let_extends_environment():
"""
The evaluation of the inner expression should have available the bindings
provided within the first argument.
"""
program = """
(let ((foo (+ 1000 42)))
foo)
"""
assert_equals("1042", interpret(program, env))
@with_setup(prepare_env)
def test_let_bindings_have_access_to_previous_bindings():
"""
Each new binding should have access to the previous bindings in the list
"""
program = """
(let ((foo 10)
(bar (+ foo 5)))
bar)
"""
assert_equals("15", interpret(program, env))
@with_setup(prepare_env)
def test_let_bindings_overshadow_outer_environment():
"""
Let bindings should shadow definitions in from outer environments
"""
interpret("(define foo 1)", env)
program = """
(let ((foo 2))
foo)
"""
assert_equals("2", interpret(program, env))
@with_setup(prepare_env)
def test_let_bindings_do_not_affect_outer_environment():
"""
After the let is evaluated, all of it's bindings are forgotten
"""
interpret("(define foo 1)", env)
assert_equals("2", interpret("(let ((foo 2)) foo)", env))
assert_equals("1", interpret("foo", env))
"""
Suggestion 4: `defn`
So far, to define functions we have had to write
(define my-function
(lambda (foo bar)
'fuction-body-here))
It is a bit ugly to have to make a lambda every time you want a named function.
Let's add some syntactic sugar, shall we:
(defn my-function (foo bar)
'function-body-here)
"""
@with_setup(prepare_env)
def test_defn_binds_the_variable_just_like_define():
"""
Like `define`, the `defn` form should bind a variable to the environment.
This variable should be a closure, just like if we had defined a new
variable using the old `define` + `lambda` syntax.
"""
interpret("(defn foo (x) (> x 10))", env)
assert_is_instance(env.lookup("foo"), Closure)
@with_setup(prepare_env)
def test_defn_result_in_the_correct_closure():
"""
The closure created should be no different than from the old syntax.
"""
interpret("(defn foo-1 (x) (> x 10))", env)
interpret("(define foo-2 (lambda (x) (> x 10)))", env)
foo1 = env.lookup("foo-1")
foo2 = env.lookup("foo-2")
assert_equals(foo1.body, foo2.body)
assert_equals(foo1.params, foo2.params)
assert_equals(foo1.env, foo2.env)
|
|
# util/langhelpers.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from compat import update_wrapper, set_types, threading
from sqlalchemy import exc
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
itertools.imap(lambda i: base + str(i),
xrange(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = inspect.getargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.func_name,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % (
metadata)
decorated = eval(code, {targ_name:target, fn_name:fn})
decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def get_cls_kwargs(cls):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
for c in cls.__mro__:
if '__init__' in c.__dict__:
stack = set([c])
break
else:
return []
args = set()
while stack:
class_ = stack.pop()
ctr = class_.__dict__.get('__init__', False)
if (not ctr or
not isinstance(ctr, types.FunctionType) or
not isinstance(ctr.func_code, types.CodeType)):
stack.update(class_.__bases__)
continue
# this is shorthand for
# names, _, has_kw, _ = inspect.getargspec(ctr)
names, has_kw = inspect_func_args(ctr)
args.update(names)
if has_kw:
stack.update(class_.__bases__)
args.discard('self')
return args
try:
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return inspect.getargspec(func)[0]
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
spec = callable(fn) and inspect.getargspec(fn) or fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
defaulted_vals = spec[3] is not None and spec[0][0-len(spec[3]):] or ()
apply_kw = inspect.formatargspec(spec[0], spec[1], spec[2], defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
self_arg = 'self'
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not required."""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self:
return func_or_cls.im_func
else:
return func_or_cls
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.im_self
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
# Py2K
if isinstance(cls, types.ClassType):
return list()
# end Py2K
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
# Py2K
if isinstance(c, types.ClassType):
continue
for b in (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType)):
# end Py2K
# Py3K
#for b in (_ for _ in c.__bases__
# if _ not in hier):
process.append(b)
hier.add(b)
# Py3K
#if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
# continue
# Py2K
if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'):
continue
# end Py2K
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
try:
env[method].func_defaults = fn.func_defaults
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
# Py3K
#return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
# Py2K
return getattr(meth1, 'im_func', meth1) is getattr(meth2, 'im_func', meth2)
# end Py2K
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not type(obj) is dict:
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
def reset_memoized(instance, name):
instance.__dict__.pop(name, None)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self):
self.attributes = []
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
class importlater(object):
"""Deferred import object.
e.g.::
somesubmod = importlater("mypackage.somemodule", "somesubmod")
is equivalent to::
from mypackage.somemodule import somesubmod
except evaluted upon attribute access to "somesubmod".
"""
def __init__(self, path, addtl=None):
self._il_path = path
self._il_addtl = addtl
@memoized_property
def module(self):
if self._il_addtl:
m = __import__(self._il_path, globals(), locals(),
[self._il_addtl])
try:
return getattr(m, self._il_addtl)
except AttributeError:
raise ImportError(
"Module %s has no attribute '%s'" %
(self._il_path, self._il_addtl)
)
else:
m = __import__(self._il_path)
for token in self._il_path.split(".")[1:]:
m = getattr(m, token)
return m
def __getattr__(self, key):
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._il_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set_types)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set_types):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
# Py3K
#if hasattr(dictlike, 'items'):
# return dictlike.items()
# Py2K
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
# end Py2K
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class _symbol(object):
def __init__(self, name, doc=None):
"""Construct a new named symbol."""
assert isinstance(name, str)
self.name = name
if doc:
self.__doc__ = doc
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return "<symbol '%s>" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = threading.Lock()
def __new__(cls, name, doc=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order +=1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to a warning."""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note:: This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, basestring):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of ``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end+1]
NoneType = type(None)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPolicyDefinitionsOperations:
"""ServiceEndpointPolicyDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified ServiceEndpoint policy definitions.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the Service Endpoint Policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
"""Get the specified service endpoint policy definitions from service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicyDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ServiceEndpointPolicyDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> "_models.ServiceEndpointPolicyDefinition":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_endpoint_policy_definitions, 'ServiceEndpointPolicyDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
service_endpoint_policy_definition_name: str,
service_endpoint_policy_definitions: "_models.ServiceEndpointPolicyDefinition",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicyDefinition"]:
"""Creates or updates a service endpoint policy definition in the specified service endpoint
policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param service_endpoint_policy_definition_name: The name of the service endpoint policy
definition name.
:type service_endpoint_policy_definition_name: str
:param service_endpoint_policy_definitions: Parameters supplied to the create or update service
endpoint policy operation.
:type service_endpoint_policy_definitions: ~azure.mgmt.network.v2019_11_01.models.ServiceEndpointPolicyDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicyDefinition or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.ServiceEndpointPolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
service_endpoint_policy_definition_name=service_endpoint_policy_definition_name,
service_endpoint_policy_definitions=service_endpoint_policy_definitions,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'serviceEndpointPolicyDefinitionName': self._serialize.url("service_endpoint_policy_definition_name", service_endpoint_policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyDefinitionListResult"]:
"""Gets all service endpoint policy definitions in a service end point policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy name.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.ServiceEndpointPolicyDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions'} # type: ignore
|
|
from django.core.urlresolvers import reverse_lazy
import os,sys
from os.path import dirname, join, exists
import logging.config
LOGFILE_BASE = 'wsgi'
#print os.environ
if 'django_mode' in os.environ:
#if 'mod_wsgi.process_group' in os.environ:
LOGFILE_BASE = 'wsgi'
else:
print "running debug server"
LOGFILE_BASE = 'dbg'
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATIC_ROOT = join(BASE_DIR, '..', 'site', 'static')
STATICFILES_DIRS = [
join(BASE_DIR, 'static')
]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
STATIC_URL = '/static/'
MENU_SELECT_PARENTS=True
MENU_HIDE_EMPTY=False
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = [
u'127.0.0.1',
u'localhost',
'localhost',
'*',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
#'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'authtools',
'menu',
'crispy_forms',
'easy_thumbnails',
'django_tables2',
'tinymce',
'profiles',
'accounts',
'zs',
'import_export',
'track',
'bootstrap3'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'godiva_web.urls'
WSGI_APPLICATION = 'godiva_web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES_ORIG = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
#AUTH_USER_MODEL = auth.User
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
# Reset logging
# (see http://www.caktusgroup.com/blog/2015/01/27/Django-Logging-Configuration-logging_config-default-settings-logger/)
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, LOGFILE_BASE+'.django.log'),
'formatter': 'verbose'
},
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, LOGFILE_BASE+'.project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django': {
'handlers': ['django_log_file'],
'propagate': True,
'level': 'WARNING',
},
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
|
|
# stdlib
from datetime import datetime
import os.path
import re
import socket
import ssl
import time
import warnings
from urlparse import urlparse
# 3rd party
import requests
import tornado
from requests.adapters import HTTPAdapter
from requests.packages import urllib3
from requests.packages.urllib3.util import ssl_
from requests.packages.urllib3.exceptions import (
SecurityWarning,
)
from requests.packages.urllib3.packages.ssl_match_hostname import \
match_hostname
# project
from checks.network_checks import EventType, NetworkCheck, Status
from config import _is_affirmative
from util import headers as agent_headers
class WeakCiphersHTTPSConnection(urllib3.connection.VerifiedHTTPSConnection):
SUPPORTED_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:'
'ECDH+HIGH:DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:'
'RSA+3DES:ECDH+RC4:DH+RC4:RSA+RC4:!aNULL:!eNULL:!EXP:-MD5:RSA+RC4+MD5'
)
def __init__(self, host, port, ciphers=None, **kwargs):
self.ciphers = ciphers if ciphers is not None else self.SUPPORTED_CIPHERS
super(WeakCiphersHTTPSConnection, self).__init__(host, port, **kwargs)
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = ssl_.resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = ssl_.resolve_ssl_version(self.ssl_version)
hostname = self.host
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
# Wrap socket using verification with the root certs in trusted_root_certs
self.sock = ssl_.ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version,
ciphers=self.ciphers)
if self.assert_fingerprint:
ssl_.assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
class WeakCiphersHTTPSConnectionPool(urllib3.connectionpool.HTTPSConnectionPool):
ConnectionCls = WeakCiphersHTTPSConnection
class WeakCiphersPoolManager(urllib3.poolmanager.PoolManager):
def _new_pool(self, scheme, host, port):
if scheme == 'https':
return WeakCiphersHTTPSConnectionPool(host, port, **(self.connection_pool_kw))
return super(WeakCiphersPoolManager, self)._new_pool(scheme, host, port)
class WeakCiphersAdapter(HTTPAdapter):
""""Transport adapter" that allows us to use TLS_RSA_WITH_RC4_128_MD5."""
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
# Rewrite of the
# requests.adapters.HTTPAdapter.init_poolmanager method
# to use WeakCiphersPoolManager instead of
# urllib3's PoolManager
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = WeakCiphersPoolManager(num_pools=connections,
maxsize=maxsize, block=block, strict=True, **pool_kwargs)
def get_ca_certs_path():
"""
Get a path to the trusted certificates of the system
"""
CA_CERTS = [
'/opt/datadog-agent/embedded/ssl/certs/cacert.pem',
os.path.join(os.path.dirname(tornado.__file__), 'ca-certificates.crt'),
'/etc/ssl/certs/ca-certificates.crt',
]
for f in CA_CERTS:
if os.path.exists(f):
return f
return None
class HTTPCheck(NetworkCheck):
SOURCE_TYPE_NAME = 'system'
SC_STATUS = 'http.can_connect'
SC_SSL_CERT = 'http.ssl_cert'
def __init__(self, name, init_config, agentConfig, instances):
self.ca_certs = init_config.get('ca_certs', get_ca_certs_path())
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
def _load_conf(self, instance):
# Fetches the conf
tags = instance.get('tags', [])
username = instance.get('username')
password = instance.get('password')
http_response_status_code = str(instance.get('http_response_status_code', "(1|2|3)\d\d"))
timeout = int(instance.get('timeout', 10))
config_headers = instance.get('headers', {})
headers = agent_headers(self.agentConfig)
headers.update(config_headers)
url = instance.get('url')
content_match = instance.get('content_match')
response_time = _is_affirmative(instance.get('collect_response_time', True))
if not url:
raise Exception("Bad configuration. You must specify a url")
include_content = _is_affirmative(instance.get('include_content', False))
ssl = _is_affirmative(instance.get('disable_ssl_validation', True))
ssl_expire = _is_affirmative(instance.get('check_certificate_expiration', True))
instance_ca_certs = instance.get('ca_certs', self.ca_certs)
weakcipher = _is_affirmative(instance.get('weakciphers', False))
return url, username, password, http_response_status_code, timeout, include_content,\
headers, response_time, content_match, tags, ssl, ssl_expire, instance_ca_certs,\
weakcipher
def _check(self, instance):
addr, username, password, http_response_status_code, timeout, include_content, headers,\
response_time, content_match, tags, disable_ssl_validation,\
ssl_expire, instance_ca_certs, weakcipher = self._load_conf(instance)
start = time.time()
service_checks = []
try:
parsed_uri = urlparse(addr)
self.log.debug("Connecting to %s" % addr)
if disable_ssl_validation and parsed_uri.scheme == "https":
self.warning("Skipping SSL certificate validation for %s based on configuration"
% addr)
auth = None
if username is not None and password is not None:
auth = (username, password)
sess = requests.Session()
if weakcipher:
base_addr = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
sess.mount(base_addr, WeakCiphersAdapter())
self.log.debug("Weak Ciphers will be used for {0}. Suppoted Cipherlist: {1}".format(
base_addr, WeakCiphersHTTPSConnection.SUPPORTED_CIPHERS))
r = sess.request('GET', addr, auth=auth, timeout=timeout, headers=headers,
verify=False if disable_ssl_validation else instance_ca_certs)
except (socket.timeout, requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
length = int((time.time() - start) * 1000)
self.log.info("%s is DOWN, error: %s. Connection failed after %s ms"
% (addr, str(e), length))
service_checks.append((
self.SC_STATUS,
Status.DOWN,
"%s. Connection failed after %s ms" % (str(e), length)
))
except socket.error, e:
length = int((time.time() - start) * 1000)
self.log.info("%s is DOWN, error: %s. Connection failed after %s ms"
% (addr, repr(e), length))
service_checks.append((
self.SC_STATUS,
Status.DOWN,
"Socket error: %s. Connection failed after %s ms" % (repr(e), length)
))
except Exception, e:
length = int((time.time() - start) * 1000)
self.log.error("Unhandled exception %s. Connection failed after %s ms"
% (str(e), length))
raise
# Only report this metric if the site is not down
if response_time and not service_checks:
# Stop the timer as early as possible
running_time = time.time() - start
# Store tags in a temporary list so that we don't modify the global tags data structure
tags_list = list(tags)
tags_list.append('url:%s' % addr)
self.gauge('network.http.response_time', running_time, tags=tags_list)
# Check HTTP response status code
if not (service_checks or re.match(http_response_status_code, str(r.status_code))):
self.log.info("Incorrect HTTP return code. Expected %s, got %s"
% (http_response_status_code, str(r.status_code)))
service_checks.append((
self.SC_STATUS,
Status.DOWN,
"Incorrect HTTP return code. Expected %s, got %s"
% (http_response_status_code, str(r.status_code))
))
if not service_checks:
# Host is UP
# Check content matching is set
if content_match:
content = r.content
if re.search(content_match, content):
self.log.debug("%s is found in return content" % content_match)
service_checks.append((
self.SC_STATUS, Status.UP, "UP"
))
else:
self.log.info("%s not found in content" % content_match)
self.log.debug("Content returned:\n%s" % content)
service_checks.append((
self.SC_STATUS,
Status.DOWN,
'Content "%s" not found in response' % content_match
))
else:
self.log.debug("%s is UP" % addr)
service_checks.append((
self.SC_STATUS, Status.UP, "UP"
))
if ssl_expire and parsed_uri.scheme == "https":
status, msg = self.check_cert_expiration(instance, timeout, instance_ca_certs)
service_checks.append((
self.SC_SSL_CERT, status, msg
))
return service_checks
# FIXME: 5.3 drop this function
def _create_status_event(self, sc_name, status, msg, instance):
# Create only this deprecated event for old check
if sc_name != self.SC_STATUS:
return
# Get the instance settings
url = instance.get('url', None)
name = instance.get('name', None)
nb_failures = self.statuses[name][sc_name].count(Status.DOWN)
nb_tries = len(self.statuses[name][sc_name])
tags = instance.get('tags', [])
tags_list = []
tags_list.extend(tags)
tags_list.append('url:%s' % url)
# Get a custom message that will be displayed in the event
custom_message = instance.get('message', "")
if custom_message:
custom_message += " \n"
# Let the possibility to override the source type name
instance_source_type_name = instance.get('source_type', None)
if instance_source_type_name is None:
source_type = "%s.%s" % (NetworkCheck.SOURCE_TYPE_NAME, name)
else:
source_type = "%s.%s" % (NetworkCheck.SOURCE_TYPE_NAME, instance_source_type_name)
# Get the handles you want to notify
notify = instance.get('notify', self.init_config.get('notify', []))
notify_message = ""
if notify:
notify_list = []
for handle in notify:
notify_list.append("@%s" % handle.strip())
notify_message = " ".join(notify_list) + " \n"
if status == Status.DOWN:
# format the HTTP response body into the event
if isinstance(msg, tuple):
code, reason, content = msg
# truncate and html-escape content
if len(content) > 200:
content = content[:197] + '...'
msg = "%d %s\n\n%s" % (code, reason, content)
msg = msg.rstrip()
title = "[Alert] %s reported that %s is down" % (self.hostname, name)
alert_type = "error"
msg = "%s %s %s reported that %s (%s) failed %s time(s) within %s last attempt(s)."\
" Last error: %s" % (notify_message, custom_message, self.hostname,
name, url, nb_failures, nb_tries, msg)
event_type = EventType.DOWN
else: # Status is UP
title = "[Recovered] %s reported that %s is up" % (self.hostname, name)
alert_type = "success"
msg = "%s %s %s reported that %s (%s) recovered" \
% (notify_message, custom_message, self.hostname, name, url)
event_type = EventType.UP
return {
'timestamp': int(time.time()),
'event_type': event_type,
'host': self.hostname,
'msg_text': msg,
'msg_title': title,
'alert_type': alert_type,
"source_type_name": source_type,
"event_object": name,
"tags": tags_list
}
def report_as_service_check(self, sc_name, status, instance, msg=None):
instance_name = self.normalize(instance['name'])
url = instance.get('url', None)
sc_tags = ['url:{0}'.format(url), "instance:{0}".format(instance_name)]
custom_tags = instance.get('tags', [])
tags = sc_tags + custom_tags
if sc_name == self.SC_STATUS:
# format the HTTP response body into the event
if isinstance(msg, tuple):
code, reason, content = msg
# truncate and html-escape content
if len(content) > 200:
content = content[:197] + '...'
msg = "%d %s\n\n%s" % (code, reason, content)
msg = msg.rstrip()
self.service_check(sc_name,
NetworkCheck.STATUS_TO_SERVICE_CHECK[status],
tags=tags,
message=msg
)
def check_cert_expiration(self, instance, timeout, instance_ca_certs):
warning_days = int(instance.get('days_warning', 14))
url = instance.get('url')
o = urlparse(url)
host = o.hostname
port = o.port or 443
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(float(timeout))
sock.connect((host, port))
ssl_sock = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_REQUIRED,
ca_certs=instance_ca_certs)
cert = ssl_sock.getpeercert()
except Exception as e:
return Status.DOWN, "%s" % (str(e))
exp_date = datetime.strptime(cert['notAfter'], "%b %d %H:%M:%S %Y %Z")
days_left = exp_date - datetime.utcnow()
if days_left.days < 0:
return Status.DOWN, "Expired by {0} days".format(days_left.days)
elif days_left.days < warning_days:
return Status.WARNING, "This cert is almost expired, only {0} days left"\
.format(days_left.days)
else:
return Status.UP, "Days left: {0}".format(days_left.days)
|
|
import sys
from numpy.testing import *
from numpy.core import *
# Guess the UCS length for this python interpreter
if len(buffer(u'u')) == 4:
ucs4 = True
else:
ucs4 = False
# Value that can be represented in UCS2 interpreters
ucs2_value = u'\uFFFF'
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
ucs4_value = u'\U0010FFFF'
############################################################
# Creation tests
############################################################
class create_zeros(NumpyTestCase):
"""Check the creation of zero-valued arrays"""
def content_test(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assert_(len(ua.data) == nbytes)
# Small check that data in array element is ok
self.assert_(ua_scalar == u'')
# Encode to ascii and double check
self.assert_(ua_scalar.encode('ascii') == '')
# Check buffer lengths for scalars
if ucs4:
self.assert_(len(buffer(ua_scalar)) == 0)
else:
self.assert_(len(buffer(ua_scalar)) == 0)
def check_zeros0D(self):
"""Check creation of 0-dimensional objects"""
ua = zeros((), dtype='U%s' % self.ulen)
self.content_test(ua, ua[()], 4*self.ulen)
def check_zerosSD(self):
"""Check creation of single-dimensional objects"""
ua = zeros((2,), dtype='U%s' % self.ulen)
self.content_test(ua, ua[0], 4*self.ulen*2)
self.content_test(ua, ua[1], 4*self.ulen*2)
def check_zerosMD(self):
"""Check creation of multi-dimensional objects"""
ua = zeros((2,3,4), dtype='U%s' % self.ulen)
self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4)
self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4)
class test_create_zeros_1(create_zeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
class test_create_zeros_2(create_zeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
class test_create_zeros_1009(create_zeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
class create_values(NumpyTestCase):
"""Check the creation of unicode arrays with values"""
def content_test(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assert_(len(ua.data) == nbytes)
# Small check that data in array element is ok
self.assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assert_(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assert_(len(buffer(ua_scalar)) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assert_(len(buffer(ua_scalar)) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assert_(len(buffer(ua_scalar)) == 2*self.ulen)
def check_values0D(self):
"""Check creation of 0-dimensional objects with values"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
self.content_test(ua, ua[()], 4*self.ulen)
def check_valuesSD(self):
"""Check creation of single-dimensional objects with values"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
self.content_test(ua, ua[0], 4*self.ulen*2)
self.content_test(ua, ua[1], 4*self.ulen*2)
def check_valuesMD(self):
"""Check creation of multi-dimensional objects with values"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4)
self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4)
class test_create_values_1_ucs2(create_values):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_create_values_1_ucs4(create_values):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_create_values_2_ucs2(create_values):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_create_values_2_ucs4(create_values):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_create_values_1009_ucs2(create_values):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_create_values_1009_ucs4(create_values):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
class assign_values(NumpyTestCase):
"""Check the assignment of unicode arrays with values"""
def content_test(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assert_(len(ua.data) == nbytes)
# Small check that data in array element is ok
self.assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assert_(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assert_(len(buffer(ua_scalar)) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assert_(len(buffer(ua_scalar)) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assert_(len(buffer(ua_scalar)) == 2*self.ulen)
def check_values0D(self):
"""Check assignment of 0-dimensional objects with values"""
ua = zeros((), dtype='U%s' % self.ulen)
ua[()] = self.ucs_value*self.ulen
self.content_test(ua, ua[()], 4*self.ulen)
def check_valuesSD(self):
"""Check assignment of single-dimensional objects with values"""
ua = zeros((2,), dtype='U%s' % self.ulen)
ua[0] = self.ucs_value*self.ulen
self.content_test(ua, ua[0], 4*self.ulen*2)
ua[1] = self.ucs_value*self.ulen
self.content_test(ua, ua[1], 4*self.ulen*2)
def check_valuesMD(self):
"""Check assignment of multi-dimensional objects with values"""
ua = zeros((2,3,4), dtype='U%s' % self.ulen)
ua[0,0,0] = self.ucs_value*self.ulen
self.content_test(ua, ua[0,0,0], 4*self.ulen*2*3*4)
ua[-1,-1,-1] = self.ucs_value*self.ulen
self.content_test(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4)
class test_assign_values_1_ucs2(assign_values):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_assign_values_1_ucs4(assign_values):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_assign_values_2_ucs2(assign_values):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_assign_values_2_ucs4(assign_values):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_assign_values_1009_ucs2(assign_values):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_assign_values_1009_ucs4(assign_values):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Byteorder tests
############################################################
class byteorder_values(NumpyTestCase):
"""Check the byteorder of unicode arrays in round-trip conversions"""
def check_values0D(self):
"""Check byteorder of 0-dimensional objects"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
self.assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def check_valuesSD(self):
"""Check byteorder of single-dimensional objects"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assert_(ua[0] != ua2[0])
self.assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def check_valuesMD(self):
"""Check byteorder of multi-dimensional objects"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assert_(ua[0,0,0] != ua2[0,0,0])
self.assert_(ua[-1,-1,-1] != ua2[-1,-1,-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
class test_byteorder_1_ucs2(byteorder_values):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_byteorder_1_ucs4(byteorder_values):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_byteorder_2_ucs2(byteorder_values):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_byteorder_2_ucs4(byteorder_values):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_byteorder_1009_ucs2(byteorder_values):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_byteorder_1009_ucs4(byteorder_values):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
if __name__ == "__main__":
NumpyTest().run()
|
|
# Copyright (C) 2014 - The MITRE Corporation
# For license information, see the LICENSE.txt file
#: Namespace map of namespaces libtaxii knows about
NS_MAP = {
'taxii': 'http://taxii.mitre.org/messages/taxii_xml_binding-1',
'taxii_11': 'http://taxii.mitre.org/messages/taxii_xml_binding-1.1',
'tdq': 'http://taxii.mitre.org/query/taxii_default_query-1',
}
#: alias for NS_MAP for backward compatibility
ns_map = NS_MAP
#: Constant identifying a Status Message
MSG_STATUS_MESSAGE = 'Status_Message'
#: Constant identifying a Discovery Request Message
MSG_DISCOVERY_REQUEST = 'Discovery_Request'
#: Constant identifying a Discovery Response Message
MSG_DISCOVERY_RESPONSE = 'Discovery_Response'
#: Constant identifying a Feed Information Request Message
MSG_FEED_INFORMATION_REQUEST = 'Feed_Information_Request'
#: Constant identifying a Feed Information Response Message
MSG_FEED_INFORMATION_RESPONSE = 'Feed_Information_Response'
#: Constant identifying a Subscription Management Request Message
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
#: Constant identifying a Subscription Management Response Message
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
#: Constant identifying a Poll Request Message
MSG_POLL_REQUEST = 'Poll_Request'
#: Constant identifying a Poll Response Message
MSG_POLL_RESPONSE = 'Poll_Response'
#: Constant identifying a Inbox Message
MSG_INBOX_MESSAGE = 'Inbox_Message'
#: TAXII 1.0 Message Types
MSG_TYPES_10 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_FEED_INFORMATION_REQUEST, MSG_FEED_INFORMATION_RESPONSE,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST,
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE)
# New Message Types in TAXII 1.1
#: Constant identifying a Status Message
MSG_POLL_FULFILLMENT_REQUEST = 'Poll_Fulfillment'
#: Constant identifying a Collection Information Request
MSG_COLLECTION_INFORMATION_REQUEST = 'Collection_Information_Request'
#: Constant identifying a Collection Information Response
MSG_COLLECTION_INFORMATION_RESPONSE = 'Collection_Information_Response'
#: Constant identifying a Subscription Request
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
#: Constant identifying a Subscription Response
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
#: Tuple of all TAXII 1.1 Message Types
MSG_TYPES_11 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_COLLECTION_INFORMATION_REQUEST, MSG_COLLECTION_INFORMATION_RESPONSE,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE, MSG_POLL_FULFILLMENT_REQUEST)
# TAXII 1.0 Status Types
#: Constant identifying a Status Type of Bad Message
ST_BAD_MESSAGE = 'BAD_MESSAGE'
#: Constant identifying a Status Type of Denied
ST_DENIED = 'DENIED'
#: Constant identifying a Status Type of Failure
ST_FAILURE = 'FAILURE'
#: Constant identifying a Status Type of Not Found
ST_NOT_FOUND = 'NOT_FOUND'
#: Constant identifying a Status Type of Polling Unsupported
ST_POLLING_UNSUPPORTED = 'POLLING_UNSUPPORTED'
#: Constant identifying a Status Type of Retry
ST_RETRY = 'RETRY'
#: Constant identifying a Status Type of Success
ST_SUCCESS = 'SUCCESS'
#: Constant identifying a Status Type of Unauthorized
ST_UNAUTHORIZED = 'UNAUTHORIZED'
#: Constant identifying a Status Type of Unsupported Message Binding
ST_UNSUPPORTED_MESSAGE_BINDING = 'UNSUPPORTED_MESSAGE'
#: Constant identifying a Status Type of Unsupported Content Binding
ST_UNSUPPORTED_CONTENT_BINDING = 'UNSUPPORTED_CONTENT'
#: Constant identifying a Status Type of Unsupported Protocol Binding
ST_UNSUPPORTED_PROTOCOL = 'UNSUPPORTED_PROTOCOL'
#: Tuple of all TAXII 1.0 Status Types
ST_TYPES_10 = (ST_BAD_MESSAGE, ST_DENIED, ST_FAILURE, ST_NOT_FOUND,
ST_POLLING_UNSUPPORTED, ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED,
ST_UNSUPPORTED_MESSAGE_BINDING, ST_UNSUPPORTED_CONTENT_BINDING,
ST_UNSUPPORTED_PROTOCOL)
# New Status Types in TAXII 1.1
#: Constant identifying a Status Type of Asynchronous Poll Error
ST_ASYNCHRONOUS_POLL_ERROR = 'ASYNCHRONOUS_POLL_ERROR'
#: Constant identifying a Status Type of Destination Collection Error
ST_DESTINATION_COLLECTION_ERROR = 'DESTINATION_COLLECTION_ERROR'
#: Constant identifying a Status Type of Invalid Response Part
ST_INVALID_RESPONSE_PART = 'INVALID_RESPONSE_PART'
#: Constant identifying a Status Type of Network Error
ST_NETWORK_ERROR = 'NETWORK_ERROR'
#: Constant identifying a Status Type of Pending
ST_PENDING = 'PENDING'
#: Constant identifying a Status Type of Unsupported Query Format
ST_UNSUPPORTED_QUERY = 'UNSUPPORTED_QUERY'
#: Tuple of all TAXII 1.1 Status types
ST_TYPES_11 = (ST_ASYNCHRONOUS_POLL_ERROR, ST_BAD_MESSAGE, ST_DENIED,
ST_DESTINATION_COLLECTION_ERROR, ST_FAILURE, ST_INVALID_RESPONSE_PART,
ST_NETWORK_ERROR, ST_NOT_FOUND, ST_PENDING, ST_POLLING_UNSUPPORTED,
ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED, ST_UNSUPPORTED_MESSAGE_BINDING,
ST_UNSUPPORTED_CONTENT_BINDING, ST_UNSUPPORTED_PROTOCOL,
ST_UNSUPPORTED_QUERY)
# TAXII 1.0 Action Types
#: Constant identifying an Action of Subscribe
ACT_SUBSCRIBE = 'SUBSCRIBE'
#: Constant identifying an Action of Unsubscribe
ACT_UNSUBSCRIBE = 'UNSUBSCRIBE'
#: Constant identifying an Action of Status
ACT_STATUS = 'STATUS'
#: Tuple of all TAXII 1.0 Action Types
ACT_TYPES_10 = (ACT_SUBSCRIBE, ACT_UNSUBSCRIBE, ACT_STATUS)
#: Constant identifying an Action of Pause
ACT_PAUSE = 'PAUSE'
#: Constant identifying an Action of Resume
ACT_RESUME = 'RESUME'
#: Tuple of all TAXII 1.1 Action types
ACT_TYPES_11 = (ACT_SUBSCRIBE, ACT_PAUSE, ACT_RESUME, ACT_UNSUBSCRIBE, ACT_STATUS)
# TAXII 1.0 Service Types
#: Constant identifying a Service Type of Inbox
SVC_INBOX = 'INBOX'
#: Constant identifying a Service Type of Poll
SVC_POLL = 'POLL'
#: Constant identifying a Service Type of Feed Management
SVC_FEED_MANAGEMENT = 'FEED_MANAGEMENT'
#: Constant identifying a Service Type of Discovery
SVC_DISCOVERY = 'DISCOVERY'
#: Tuple of all TAXII 1.0 Service Types
SVC_TYPES_10 = (SVC_INBOX, SVC_POLL, SVC_FEED_MANAGEMENT, SVC_DISCOVERY)
# Renamed Status Types in TAXII 1.1
#: Constant identifying a Service Type of Collection Management.
#: "Feed Management" was renamed to "Collection Management" in TAXII 1.1.
SVC_COLLECTION_MANAGEMENT = 'COLLECTION_MANAGEMENT'
#: Tuple of all TAXII 1.1 Service Types
SVC_TYPES_11 = (SVC_INBOX, SVC_POLL, SVC_COLLECTION_MANAGEMENT, SVC_DISCOVERY)
# TAXII 1.1 Subscription Statuses
#: Subscription Status of Active
SS_ACTIVE = 'ACTIVE'
#: Subscription Status of Paused
SS_PAUSED = 'PAUSED'
#: Subscription Status of Unsubscribed
SS_UNSUBSCRIBED = 'UNSUBSCRIBED'
#: Tuple of all TAXII 1.1 Subscription Statues
SS_TYPES_11 = (SS_ACTIVE, SS_PAUSED, SS_UNSUBSCRIBED)
# TAXII 1.1 Response Types
#: Constant identifying a response type of Full
RT_FULL = 'FULL'
#: Constant identifying a response type of Count only
RT_COUNT_ONLY = 'COUNT_ONLY'
#: Tuple of all TAXII 1.1 Response Types
RT_TYPES_11 = (RT_FULL, RT_COUNT_ONLY)
# TAXII 1.1 Response Types
#: Constant identifying a collection type of Data Feed
CT_DATA_FEED = 'DATA_FEED'
#: Constant identifying a collection type of Data Set
CT_DATA_SET = 'DATA_SET'
#: Tuple of all TAXII 1.1 Collection Types
CT_TYPES_11 = (CT_DATA_FEED, CT_DATA_SET)
# TAXII 1.1 Status Detail Keys
#: Constant Identifying the Acceptable Destination Status Detail
SD_ACCEPTABLE_DESTINATION = 'ACCEPTABLE_DESTINATION'
#: Constant Identifying the Max Part Number Status Detail
SD_MAX_PART_NUMBER = 'MAX_PART_NUMBER'
#: Constant Identifying the Item Status Detail
SD_ITEM = 'ITEM'
#: Constant Identifying the Estimated Wait Status Detail
SD_ESTIMATED_WAIT = 'ESTIMATED_WAIT'
#: Constant Identifying the Result ID Status Detail
SD_RESULT_ID = 'RESULT_ID'
#: Constant Identifying the Will Push Status Detail
SD_WILL_PUSH = 'WILL_PUSH'
#: Constant Identifying the Supported Binding Status Detail
SD_SUPPORTED_BINDING = 'SUPPORTED_BINDING'
#: Constant Identifying the Supported Content Status Detail
SD_SUPPORTED_CONTENT = 'SUPPORTED_CONTENT'
#: Constant Identifying the Supported Protocol Status Detail
SD_SUPPORTED_PROTOCOL = 'SUPPORTED_PROTOCOL'
#: Constant Identifying the Supported Query Status Detail
SD_SUPPORTED_QUERY = 'SUPPORTED_QUERY'
#: Tuple of all TAXII 1.1 Status Detail Keys
SD_TYPES_11 = (SD_ACCEPTABLE_DESTINATION, SD_MAX_PART_NUMBER, SD_ITEM,
SD_ESTIMATED_WAIT, SD_RESULT_ID, SD_WILL_PUSH,
SD_SUPPORTED_BINDING, SD_SUPPORTED_CONTENT, SD_SUPPORTED_PROTOCOL,
SD_SUPPORTED_QUERY)
#: (For TAXII Default Query) Constant identifying supported Capability Modules
SD_CAPABILITY_MODULE = 'CAPABILITY_MODULE'
#: (For TAXII Default Query) Constant identifying Preferred Scopes
SD_PREFERRED_SCOPE = 'PREFERRED_SCOPE'
#: (For TAXII Default Query) Constant identifying Allowed Scopes
SD_ALLOWED_SCOPE = 'ALLOWED_SCOPE'
#: (For TAXII Default Query) Constant identifying supported Targeting Expression IDs
SD_TARGETING_EXPRESSION_ID = 'TARGETING_EXPRESSION_ID'
#: Format ID for this version of TAXII Default Query
FID_TAXII_DEFAULT_QUERY_10 = 'urn:taxii.mitre.org:query:default:1.0'
# Capability Module IDs
#: Capability Module ID for Core
CM_CORE = 'urn:taxii.mitre.org:query:capability:core-1'
#: Capability Module ID for Regex
CM_REGEX = 'urn:taxii.mitre.org:query:capability:regex-1'
#: Capability Module ID for Timestamp
CM_TIMESTAMP = 'urn:taxii.mitre.org:query:capability:timestamp-1'
#: Tuple of all capability modules defined in TAXII Default Query 1.0
CM_IDS = (CM_CORE, CM_REGEX, CM_TIMESTAMP)
# Operators
#: Operator OR
OP_OR = 'OR'
#: Operator AND
OP_AND = 'AND'
#: Tuple of all operators
OP_TYPES = (OP_OR, OP_AND)
#: Status Type indicating an unsupported capability module
ST_UNSUPPORTED_CAPABILITY_MODULE = 'UNSUPPORTED_CAPABILITY_MODULE'
#: Status Type indicating an unsupported targeting expression
ST_UNSUPPORTED_TARGETING_EXPRESSION = 'UNSUPPORTED_TARGETING_EXPRESSION'
#: Status Type indicating an unsupported targeting expression id
ST_UNSUPPORTED_TARGETING_EXPRESSION_ID = 'UNSUPPORTED_TARGETING_EXPRESSION_ID'
#: Parameter name: value
P_VALUE = 'value'
#: Parameter name: match_type
P_MATCH_TYPE = 'match_type'
#: Parameter name: case_sensitive
P_CASE_SENSITIVE = 'case_sensitive'
#: Tuple of all parameter names
P_NAMES = (P_VALUE, P_MATCH_TYPE, P_CASE_SENSITIVE)
#: Relationship name: equals
R_EQUALS = 'equals'
#: Relationship name: not_requals
R_NOT_EQUALS = 'not_equals'
#: Relationship name: greater_than
R_GREATER_THAN = 'greater_than'
#: Relationship name: greater_than_or_equal
R_GREATER_THAN_OR_EQUAL = 'greater_than_or_equal'
#: Relationship name: less_than
R_LESS_THAN = 'less_than'
#: Relationship name: less_than_or_equal
R_LESS_THAN_OR_EQUAL = 'less_than_or_equal'
#: Relationship name: does_not_exist
R_DOES_NOT_EXIST = 'does_not_exist'
#: Relationship name: exists
R_EXISTS = 'exists'
#: Relationship name: begins_with
R_BEGINS_WITH = 'begins_with'
#: Relationship name: ends_with
R_ENDS_WITH = 'ends_with'
#: Relationship name: contains
R_CONTAINS = 'contains'
#: Relationship name: matches
R_MATCHES = 'matches'
#: Tuple of all relationship names
R_NAMES = (R_EQUALS, R_NOT_EQUALS, R_GREATER_THAN,
R_GREATER_THAN_OR_EQUAL, R_LESS_THAN,
R_LESS_THAN_OR_EQUAL, R_DOES_NOT_EXIST,
R_EXISTS, R_BEGINS_WITH, R_ENDS_WITH,
R_CONTAINS, R_MATCHES)
# TAXII Version IDs #
#: Version ID for the TAXII Services Specification 1.0
VID_TAXII_SERVICES_10 = 'urn:taxii.mitre.org:services:1.0'
#: Version ID for the TAXII Services Specification 1.1
VID_TAXII_SERVICES_11 = 'urn:taxii.mitre.org:services:1.1'
#: Version ID for the TAXII XML Message Binding Specification 1.0
VID_TAXII_XML_10 = 'urn:taxii.mitre.org:message:xml:1.0'
#: Version ID for the TAXII XML Message Binding Specification 1.1
VID_TAXII_XML_11 = 'urn:taxii.mitre.org:message:xml:1.1'
#: Version ID for the TAXII HTTP Protocol Binding Specification 1.0
VID_TAXII_HTTP_10 = 'urn:taxii.mitre.org:protocol:http:1.0'
#: Version ID for the TAXII HTTPS Protocol Binding Specification 1.0
VID_TAXII_HTTPS_10 = 'urn:taxii.mitre.org:protocol:https:1.0'
# Third Party Version IDs
#: Version ID for the CERT EU JSON Message Binding
VID_CERT_EU_JSON_10 = 'urn:cert.europa.eu:message:json:1.0'
# TAXII Content Bindings #
#: Content Binding ID for STIX XML 1.0
CB_STIX_XML_10 = 'urn:stix.mitre.org:xml:1.0'
#: Content Binding ID for STIX XML 1.0.1
CB_STIX_XML_101 = 'urn:stix.mitre.org:xml:1.0.1'
#: Content Binding ID for STIX XML 1.1
CB_STIX_XML_11 = 'urn:stix.mitre.org:xml:1.1'
#: Content Binding ID for STIX XML 1.1.1
CB_STIX_XML_111 = 'urn:stix.mitre.org:xml:1.1.1'
#: Content Binding ID for CAP 1.1
CB_CAP_11 = 'urn:oasis:names:tc:emergency:cap:1.1'
#: Content Binding ID for XML Encryption
CB_XENC_122002 = 'http://www.w3.org/2001/04/xmlenc#'
#: Content Binding ID for SMIME
CB_SMIME = 'application/x-pks7-mime'
STD_INDENT = ' ' # A "Standard Indent" to use for to_text() methods
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.python.framework import tensor_shape
# pylint: disable=access-member-before-definition
def _time_distributed_dense(x,
w,
b=None,
dropout=None,
input_dim=None,
output_dim=None,
timesteps=None,
training=None):
"""Apply `y . w + b` for every temporal slice y of x.
Arguments:
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: wether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
output_dim: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
training: training phase tensor or boolean.
Returns:
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
x.set_shape([None, None, output_dim])
else:
x = K.reshape(x, (-1, timesteps, output_dim))
return x
class Recurrent(Layer):
"""Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
Example:
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# for subsequent layers, no need to specify the input size:
model.add(LSTM(16))
# to stack recurrent layers, you must use return_sequences=True
# on any recurrent layer that feeds into another recurrent layer.
# note that you only need to specify the input size on the first layer.
model = Sequential()
model.add(LSTM(64, input_dim=64, input_length=10, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(10))
```
Arguments:
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
implementation: one of {0, 1, or 2}.
If set to 0, the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to 1, the RNN will use more matrix products,
but smaller ones, thus running slower
(may actually be faster on GPU) while consuming less memory.
If set to 2 (LSTM/GRU only),
the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU.
Note: RNN dropout must be shared for all gates,
resulting in a slightly reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
Input shape:s
3D tensor with shape `(batch_size, timesteps, input_dim)`,
(Optional) 2D tensors with shape `(batch_size, output_dim)`.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an `Embedding` layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
implementation=0,
**kwargs):
super(Recurrent, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.implementation = implementation
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self.dropout = 0
self.recurrent_dropout = 0
def _compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], self.units)
else:
output_shape = (input_shape[0], self.units)
if self.return_state:
state_shape = [tensor_shape.TensorShape(
(input_shape[0], self.units)) for _ in self.states]
return [tensor_shape.TensorShape(output_shape)] + state_shape
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
return output_mask
def step(self, inputs, states):
raise NotImplementedError
def get_constants(self, inputs, training=None):
return []
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1,
self.units]) # (samples, output_dim)
initial_state = [initial_state for _ in range(len(self.states))]
return initial_state
def preprocess_input(self, inputs, training=None):
return inputs
def __call__(self, inputs, initial_state=None, **kwargs):
# If `initial_state` is specified,
# and if it a Keras tensor,
# then add it to the inputs and temporarily
# modify the input spec to include the state.
if initial_state is None:
return super(Recurrent, self).__call__(inputs, **kwargs)
if not isinstance(initial_state, (list, tuple)):
initial_state = [initial_state]
is_keras_tensor = hasattr(initial_state[0], '_keras_history')
for tensor in initial_state:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state of an RNN layer cannot be'
' specified with a mix of Keras tensors and'
' non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state
input_spec = self.input_spec
state_spec = self.state_spec
if not isinstance(input_spec, list):
input_spec = [input_spec]
if not isinstance(state_spec, list):
state_spec = [state_spec]
self.input_spec = input_spec + state_spec
# Compute the full inputs, including state
inputs = [inputs] + list(initial_state)
# Perform the call
output = super(Recurrent, self).__call__(inputs, **kwargs)
# Restore original input spec
self.input_spec = input_spec
return output
else:
kwargs['initial_state'] = initial_state
return super(Recurrent, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, training=None, initial_state=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
if self.unroll and input_shape[1] is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
constants = self.get_constants(inputs, training=None)
preprocessed_input = self.preprocess_input(inputs, training=None)
last_output, outputs, states = K.rnn(
self.step,
preprocessed_input,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
# Properly set learning phase
if 0 < self.dropout + self.recurrent_dropout:
last_output._uses_learning_phase = True
outputs._uses_learning_phase = True
if not self.return_sequences:
outputs = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [outputs] + states
return outputs
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
self.states = [K.zeros((batch_size, self.units)) for _ in self.states]
elif states is None:
for state in self.states:
K.set_value(state, np.zeros((batch_size, self.units)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if value.shape != (batch_size, self.units):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' + self.name +
': expected shape=' + str((batch_size, self.units)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'implementation': self.implementation
}
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(Recurrent):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNN, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation > 0:
return inputs
else:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
return _time_distributed_dense(
inputs,
self.kernel,
self.bias,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
def step(self, inputs, states):
if self.implementation == 0:
h = inputs
else:
if 0 < self.dropout < 1:
h = K.dot(inputs * states[1], self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
prev_output = states[0]
if 0 < self.recurrent_dropout < 1:
prev_output *= states[2]
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(Recurrent):
"""Gated Recurrent Unit - Cho et al.
2014.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [On the Properties of Neural Machine Translation: Encoder-Decoder
Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(GRU, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units:self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:, self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units:self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
x_z = _time_distributed_dense(
inputs,
self.kernel_z,
self.bias_z,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_r = _time_distributed_dense(
inputs,
self.kernel_r,
self.bias_r,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_h = _time_distributed_dense(
inputs,
self.kernel_h,
self.bias_h,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
return K.concatenate([x_z, x_r, x_h], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def step(self, inputs, states):
h_tm1 = states[0] # previous memory
dp_mask = states[1] # dropout matrices for recurrent units
rec_dp_mask = states[2]
if self.implementation == 2:
matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units:2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
else:
if self.implementation == 0:
x_z = inputs[:, :self.units]
x_r = inputs[:, self.units:2 * self.units]
x_h = inputs[:, 2 * self.units:]
elif self.implementation == 1:
x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
else:
raise ValueError('Unknown `implementation` mode.')
z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_h))
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(Recurrent):
"""Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [Long short-term
memory]((http://www.bioinf.jku.at/publications/older/2604.pdf)
(original 1997 paper)
- [Supervised sequence labeling with recurrent neural
networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(LSTM, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [
InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))
]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units:self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2:self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units:
self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2:
self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units:self.units * 2]
self.bias_c = self.bias[self.units * 2:self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = _time_distributed_dense(
inputs,
self.kernel_i,
self.bias_i,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_f = _time_distributed_dense(
inputs,
self.kernel_f,
self.bias_f,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_c = _time_distributed_dense(
inputs,
self.kernel_c,
self.bias_c,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_o = _time_distributed_dense(
inputs,
self.kernel_o,
self.bias_o,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if self.implementation == 2:
z = K.dot(inputs * dp_mask[0], self.kernel)
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units:2 * self.units]
z2 = z[:, 2 * self.units:3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
else:
if self.implementation == 0:
x_i = inputs[:, :self.units]
x_f = inputs[:, self.units:2 * self.units]
x_c = inputs[:, 2 * self.units:3 * self.units]
x_o = inputs[:, 3 * self.units:]
elif self.implementation == 1:
x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
else:
raise ValueError('Unknown `implementation` mode.')
i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(
x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
self.recurrent_kernel_o))
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
# A list of the property names in RunConfig user allows to change. They will
# not affect the execution framework, so when execution framework checks the
# `uid` of the RunConfig, it should be ingored.
_DEFAULT_UID_WHITE_LIST = [
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
]
class Environment(object):
# For running general distributed training.
CLOUD = 'cloud'
# For running Google-internal distributed training.
GOOGLE = 'google'
# For running on local desktop.
LOCAL = 'local'
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class ClusterConfig(object):
"""This class specifies the configurations for a distributed run.
If you're using `tf.learn` `Estimators`, you should probably use the subclass
RunConfig instead.
"""
def __init__(self, master=None, evaluation_master=None):
"""Constructor.
Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the
args), `num_ps_replicas`, `task_id`, and `task_type` based on the
`TF_CONFIG` environment variable, if the pertinent information is
present. The `TF_CONFIG` environment variable is a JSON object with
attributes: `cluster`, `environment`, and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the TaskType enums) to a
list of task addresses.
`environment` specifies the runtime environment for the job (usually one of
the `Environment` enums). Defaults to `LOCAL`.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information, the
following properties are set on this class:
* `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.
* `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` attribute of `cluster_spec`. Defaults to 0.
* `is_chief` is deteremined based on `task_type`, `type_id`, and
`environment`.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 3
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
evaluation_master: The master on which to perform evaluation.
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
# Set task_type and task_id if the TF_CONFIG environment variable is
# present. Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._task_type = task_env.get('type', None)
self._task_id = self.get_task_id()
self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))
self._master = (master if master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
self._num_ps_replicas = _count_ps(self._cluster_spec) or 0
self._num_worker_replicas = _count_worker(self._cluster_spec) or 0
# Set is_chief.
self._environment = config.get('environment', Environment.LOCAL)
self._is_chief = None
if self._task_type is None:
self._is_chief = (self._task_id == 0)
elif self._environment == Environment.CLOUD:
# When the TF_CONFIG environment variable is set, we can set the
# default of is_chief to 0 when task_type is "master" and task_id is 0.
self._is_chief = (self._task_type == TaskType.MASTER and
self._task_id == 0)
else:
# Legacy behavior is that is_chief is None if task_id == 0.
self._is_chief = (self._task_type == TaskType.WORKER and
self._task_id == 0)
self._evaluation_master = evaluation_master or ''
@property
def cluster_spec(self):
return self._cluster_spec
@property
def environment(self):
return self._environment
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@staticmethod
def get_task_id():
"""Returns task index from `TF_CONFIG` environmental variable.
If you have a ClusterConfig instance, you can just access its task_id
property instead of calling this function and re-parsing the environmental
variable.
Returns:
`TF_CONFIG['task']['index']`. Defaults to 0.
"""
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
task_env = config.get('task', {})
task_index = task_env.get('index')
return int(task_index) if task_index else 0
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is the implementation of ${tf.estimator.RunConfig} interface.
If you're a Google-internal user using command line flags with
`learn_runner.py` (for instance, to do distributed training or to use
parameter servers), you probably want to use `learn_runner.EstimatorConfig`
instead.
"""
_USE_DEFAULT = 0
def __init__(self,
master=None,
num_cores=0,
log_device_placement=False,
gpu_memory_fraction=1,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=_USE_DEFAULT,
save_checkpoints_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
evaluation_master='',
model_dir=None,
session_config=None):
"""Constructor.
Note that the superclass `ClusterConfig` may set properties like
`cluster_spec`, `is_chief`, `master` (if `None` in the args),
`num_ps_replicas`, `task_id`, and `task_type` based on the `TF_CONFIG`
environment variable. See `ClusterConfig` for more details.
Args:
master: TensorFlow master. Defaults to empty string for local.
num_cores: Number of cores to be used. If 0, the system picks an
appropriate number (default: 0).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
evaluation_master: the master on which to perform evaluation.
model_dir: directory where model parameters, graph etc are saved. If
`None`, will use `model_dir` property in `TF_CONFIG` environment
variable. If both are set, must have same value. If both are `None`, see
`Estimator` about where the model will be saved.
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
"""
super(RunConfig, self).__init__(
master=master, evaluation_master=evaluation_master)
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._tf_config = config_pb2.ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self._tf_random_seed = tf_random_seed
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
self._save_checkpoints_secs = 600
else:
self._save_checkpoints_secs = None
self._save_checkpoints_steps = save_checkpoints_steps
# TODO(weiho): Remove these after ModelFn refactoring, when users can
# create Scaffold and Saver in their model_fn to set these.
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._model_dir = _get_model_dir(model_dir)
@experimental
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = ordered_state['_cluster_spec'].as_dict()
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
@property
def model_dir(self):
return self._model_dir
@property
def tf_config(self):
return self._tf_config
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def session_config(self):
return self._session_config
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _count_worker(cluster_spec):
"""Counts the number of workers in cluster_spec."""
return len(cluster_spec.as_dict().get('worker', [])) if cluster_spec else 0
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type:
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if task_id >= len(addresses) or task_id < 0:
raise ValueError(
'%d is not a valid task_id for task_type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return 'grpc://' + addresses[task_id]
# For backwards compatibility, we return empty string if task_type was
# not set (task_type did not previously exist).
return ''
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
|
|
import pytest
from musixmatch import Musixmatch
from . import results
class TestMusixmatch:
@classmethod
def setup_class(cls):
cls.musixmatch = Musixmatch("test")
cls.url = "http://api.musixmatch.com/ws/1.1/"
def test_get_url(self):
assert (
self.musixmatch._get_url(
"chart.artists.get?" "page=1&page_size=1&country=us" "&format=json",
)
== f"{self.url}chart.artists.get?page=1&page_size=1&country=us&format=json&apikey=test"
)
def test_apikey(self):
assert self.musixmatch._apikey == "test"
def test_chart_artists(self, requests_mock):
json = results.CHART_ARTISTS
url = "http://api.musixmatch.com/ws/1.1/chart.artists.get?page=1&page_size=1&country=us&format=json"
requests_mock.get(url=url, json=json)
request = self.musixmatch.chart_artists(1, 1)
assert json == request
def test_chart_tracks_get(self, requests_mock):
json = results.TRACKS
url = "http://api.musixmatch.com/ws/1.1/chart.tracks.get?page=1&page_size=1&country=us&format=json&f_has_lyrics=1"
requests_mock.get(url=url, json=json)
request = self.musixmatch.chart_tracks_get(1, 1, 1)
assert json == request
@pytest.mark.skip("Refactor test")
def test_track_search(self):
self.assertEqual(
self.musixmatch.track_search(
q_track="Let Me Love You",
q_artist="justinbieber",
page_size=10,
page=1,
s_track_rating="desc",
)["message"]["body"]["track_list"],
[],
)
@pytest.mark.skip("Refactor test")
def test_track_get(self):
self.assertEqual(
self.musixmatch.track_get(15445219)["message"]["body"]["track"][
"artist_name"
],
"Lady Gaga",
)
self.assertEqual(
self.musixmatch.track_get(15445219)["message"]["body"]["track"][
"album_name"
],
"The Fame Monster",
)
def test_track_lyrics_get(self, requests_mock):
json = results.TRACKS
url = "http://api.musixmatch.com/ws/1.1/track.lyrics.get?track_id=12345"
requests_mock.get(url=url, json=json)
request = self.musixmatch.track_lyrics_get(12345)
assert json == request
def test_track_snippet_get(self, requests_mock):
json = results.TRACK_SNIPPET
url = "http://api.musixmatch.com/ws/1.1/track.snippet.get?track_id=12345"
requests_mock.get(url=url, json=json)
request = self.musixmatch.track_snippet_get(12345)
assert json == request
@pytest.mark.skip("Refactor test")
def test_track_subtitle_get(self):
self.assertEqual(
self.musixmatch.track_subtitle_get(14201829)["message"]["body"],
"",
)
@pytest.mark.skip("Refactor test")
def test_track_richsync_get(self):
self.assertEqual(
self.musixmatch.track_richsync_get(114837357)["message"]["body"][
"richsync"
]["richsync_id"],
6,
)
self.assertEqual(
self.musixmatch.track_richsync_get(114837357)["message"]["body"][
"richsync"
]["richsync_length"],
230,
)
@pytest.mark.skip("Refactor test")
def test_track_lyrics_post(self):
self.assertEqual(
self.musixmatch.track_lyrics_post(1471157, "test")["message"]["header"][
"status_code"
],
200,
)
self.assertEqual(
self.musixmatch.track_lyrics_post(1471157, "test")["message"]["body"],
"",
)
@pytest.mark.skip("Refactor test")
def test_track_lyrics_feedback_post(self):
self.assertEqual(
self.musixmatch.track_lyrics_post(1471157, 4193713, "wrong_verses")[
"message"
]["body"],
"",
)
@pytest.mark.skip("Refactor test")
def test_matcher_lyrics_get(self):
self.assertEqual(
self.musixmatch.matcher_lyrics_get("Sexy and I know it", "LMFAO")[
"message"
]["body"]["lyrics"]["lyrics_language_description"],
"English",
)
self.assertEqual(
self.musixmatch.matcher_lyrics_get("Sexy and I know it", "LMFAO")[
"message"
]["body"]["lyrics"]["lyrics_language"],
"en",
)
@pytest.mark.skip("Refactor test")
def test_matcher_track_get(self):
self.assertEqual(
self.musixmatch.matcher_track_get("Lose Yourself (soundtrack)", "Eminem")[
"message"
]["body"]["track"]["track_name"],
"Lose Yourself - " "Soundtrack Version" " (Explicit)",
)
self.assertEqual(
self.musixmatch.matcher_track_get("Lose Yourself (soundtrack)", "Eminem")[
"message"
]["body"]["track"]["album_name"],
"Curtain Call",
)
@pytest.mark.skip("Refactor test")
def test_matcher_subtitle_get(self):
self.assertEqual(
self.musixmatch.matcher_subtitle_get("Sexy and I know it", "LMFAO", 200, 3)[
"message"
]["body"],
"",
)
@pytest.mark.skip("Refactor test")
def test_artist_get(self):
self.assertEqual(
self.musixmatch.artist_get(118)["message"]["body"]["artist"]["artist_name"],
"Queen",
)
self.assertEqual(
self.musixmatch.artist_get(118)["message"]["body"]["artist"]["artist_mbid"],
"5eecaf18-02ec-47af-a4f2-7831db373419",
)
@pytest.mark.skip("Refactor test")
def test_artist_search(self):
self.assertEqual(
self.musixmatch.artist_search(
"prodigy",
1,
1,
16439,
"4a4ee089-93b1-4470-af9a-6ff575d32704",
)["message"]["body"]["artist_list"][0]["artist"]["artist_id"],
16439,
)
self.assertEqual(
self.musixmatch.artist_search(
"prodigy",
1,
1,
16439,
"4a4ee089-93b1-4470-af9a-6ff575d32704",
)["message"]["body"]["artist_list"][0]["artist"]["artist_name"],
"The Prodigy",
)
@pytest.mark.skip("Refactor test")
def test_artist_albums_get(self):
self.assertEqual(
self.musixmatch.artist_albums_get(1039, 1, 1, 1, "desc")["message"]["body"][
"album_list"
][0]["album"]["album_id"],
25660826,
)
self.assertEqual(
self.musixmatch.artist_albums_get(1039, 1, 1, 1, "desc")["message"]["body"][
"album_list"
][0]["album"]["album_name"],
"Kaleidoscope",
)
@pytest.mark.skip("Refactor test")
def test_artist_related_get(self):
self.assertEqual(
self.musixmatch.artist_related_get(56, 1, 1)["message"]["body"][
"artist_list"
][0]["artist"]["artist_id"],
298,
)
self.assertEqual(
self.musixmatch.artist_related_get(56, 1, 1)["message"]["body"][
"artist_list"
][0]["artist"]["artist_name"],
"Outkast",
)
@pytest.mark.skip("Refactor test")
def test_album_get(self):
self.assertEqual(
self.musixmatch.album_get(14250417)["message"]["body"]["album"]["album_id"],
14250417,
)
self.assertEqual(
self.musixmatch.album_get(14250417)["message"]["body"]["album"][
"album_name"
],
"Party Rock",
)
@pytest.mark.skip("Refactor test")
def test_album_tracks_get(self):
self.assertEqual(
self.musixmatch.album_tracks_get(13750844, 1, 1, "")["message"]["body"][
"track_list"
][0]["track"]["track_id"],
30057052,
)
self.assertEqual(
self.musixmatch.album_tracks_get(13750844, 1, 1, "")["message"]["body"][
"track_list"
][0]["track"]["track_name"],
"Don't Panic",
)
@pytest.mark.skip("Refactor test")
def test_tracking_url_get(self):
self.assertEqual(
self.musixmatch.tracking_url_get("www.mylyricswebsite.com")["message"][
"header"
]["status_code"],
200,
)
@pytest.mark.skip("Refactor test")
def test_catalogue_dump_get(self):
self.assertEqual(
self.musixmatch.catalogue_dump_get("test")["message"]["body"],
"",
)
|
|
# Copyright 2014-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from touchdown.core import argument, serializers
from touchdown.core.plan import Plan, Present
from touchdown.core.resource import Resource
from .. import route53
from ..account import BaseAccount
from ..common import RefreshMetadata, SimpleApply, SimpleDescribe, SimpleDestroy, Waiter
from ..elb import LoadBalancer
from ..iam import ServerCertificate
from ..s3 import Bucket
from ..waf import WebACL
from .common import CloudFrontList, CloudFrontResourceList, S3Origin
class CustomOrigin(Resource):
resource_name = "custom_origin"
dot_ignore = True
extra_serializers = {"CustomHeaders": serializers.Dict(Quantity=0, Items=[])}
name = argument.String(field="Id")
domain_name = argument.String(field="DomainName")
origin_path = argument.String(default="", field="OriginPath")
http_port = argument.Integer(
default=80, field="HTTPPort", group="custom-origin-config"
)
https_port = argument.Integer(
default=443, field="HTTPSPort", group="custom-origin-config"
)
protocol = argument.String(
choices=["http-only", "match-viewer"],
default="match-viewer",
field="OriginProtocolPolicy",
group="custom-origin-config",
)
ssl_policy = argument.List(
choices=["SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.1_2016"],
default=["SSLv3", "TLSv1"],
field="OriginSslProtocols",
group="custom-origin-config",
serializer=CloudFrontList(serializers.List()),
)
_custom_origin_config = argument.Serializer(
serializer=serializers.Resource(group="custom-origin-config"),
field="CustomOriginConfig",
)
class LoadBalancerOrigin(Resource):
resource_name = "elb_origin"
dot_ignore = True
extra_serializers = {"CustomHeaders": serializers.Dict(Quantity=0, Items=[])}
name = argument.String(field="Id")
load_balancer = argument.Resource(
LoadBalancer, field="DomainName", serializer=serializers.Property("DNSName")
)
origin_path = argument.String(default="", field="OriginPath")
http_port = argument.Integer(
default=80, field="HTTPPort", group="custom-origin-config"
)
https_port = argument.Integer(
default=443, field="HTTPSPort", group="custom-origin-config"
)
protocol = argument.String(
choices=["http-only", "match-viewer"],
default="match-viewer",
field="OriginProtocolPolicy",
group="custom-origin-config",
)
ssl_policy = argument.List(
choices=["SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.1_2016"],
default=["SSLv3", "TLSv1"],
field="OriginSslProtocols",
group="custom-origin-config",
serializer=CloudFrontList(serializers.List()),
)
_custom_origin_config = argument.Serializer(
serializer=serializers.Resource(group="custom-origin-config"),
field="CustomOriginConfig",
)
class DefaultCacheBehavior(Resource):
resource_name = "default_cache_behaviour"
dot_ignore = True
extra_serializers = {
# TrustedSigners are not supported yet, so include stub in serialized form
"TrustedSigners": serializers.Const({"Enabled": False, "Quantity": 0}),
"AllowedMethods": CloudFrontList(
inner=serializers.Argument("allowed_methods"),
CachedMethods=serializers.Argument("cached_methods"),
),
"ForwardedValues": serializers.Resource(
group="forwarded-values",
Cookies=serializers.Resource(
group="cookies",
Forward=serializers.Expression(
lambda r, o: "all"
if o.forward_cookies == ["*"]
else "none"
if len(o.forward_cookies) == 0
else "whitelist"
),
),
),
}
target_origin = argument.String(field="TargetOriginId")
forward_query_string = argument.Boolean(
default=True, field="QueryString", group="forwarded-values"
)
forward_headers = argument.List(
field="Headers",
serializer=CloudFrontList(serializers.List()),
group="forwarded-values",
)
forward_cookies = argument.List(
field="WhitelistedNames",
serializer=CloudFrontList(
serializers.Expression(lambda r, o: [] if o == ["*"] else o)
),
group="cookies",
)
allowed_methods = argument.List(default=lambda x: ["GET", "HEAD"])
cached_methods = argument.List(
default=lambda x: ["GET", "HEAD"], serializer=CloudFrontList()
)
default_ttl = argument.Integer(default=86400, field="DefaultTTL")
min_ttl = argument.Integer(default=0, field="MinTTL")
max_ttl = argument.Integer(default=31536000, field="MaxTTL")
compress = argument.Boolean(default=False, field="Compress")
viewer_protocol_policy = argument.String(
choices=["allow-all", "https-only", "redirect-to-https"],
default="allow-all",
field="ViewerProtocolPolicy",
)
smooth_streaming = argument.Boolean(default=False, field="SmoothStreaming")
class CacheBehavior(DefaultCacheBehavior):
resource_name = "cache_behaviour"
path_pattern = argument.String(field="PathPattern")
class ErrorResponse(Resource):
resource_name = "error_response"
dot_ignore = True
error_code = argument.Integer(
field="ErrorCode",
choices=["400", "403", "404", "405", "414", "500", "501", "502", "503", "504"],
)
response_page_path = argument.String(field="ResponsePagePath")
response_code = argument.String(
field="ResponseCode",
choices=[
"200",
"400",
"403",
"404",
"405",
"414",
"500",
"501",
"502",
"503",
"504",
],
)
min_ttl = argument.Integer(field="ErrorCachingMinTTL")
class LoggingConfig(Resource):
resource_name = "logging_config"
dot_ignore = True
enabled = argument.Boolean(field="Enabled", default=False)
include_cookies = argument.Boolean(field="IncludeCookies", default=False)
bucket = argument.Resource(
Bucket,
field="Bucket",
serializer=serializers.Append(
".s3.amazonaws.com", serializers.Property("Name")
),
empty_serializer=serializers.Const(""),
)
prefix = argument.String(field="Prefix", default="")
class Distribution(Resource):
resource_name = "distribution"
extra_serializers = {
"CallerReference": serializers.Expression(
lambda runner, object: runner.get_plan(object).object.get(
"CallerReference", str(uuid.uuid4())
)
),
"Aliases": CloudFrontList(
serializers.Chain(
serializers.Argument("cname"), serializers.Argument("aliases")
)
),
# We don't support GeoRestrictions yet - so include a stubbed default
# when serializing
"Restrictions": serializers.Const(
{"GeoRestriction": {"RestrictionType": "none", "Quantity": 0}}
),
}
name = argument.String()
cname = argument.String(
default=lambda instance: instance.name,
serializer=serializers.ListOfOne(maybe_empty=True),
)
comment = argument.String(field="Comment", default=lambda instance: instance.name)
aliases = argument.List()
root_object = argument.String(default="/", field="DefaultRootObject")
enabled = argument.Boolean(default=True, field="Enabled")
origins = argument.ResourceList(
(S3Origin, CustomOrigin, LoadBalancerOrigin),
field="Origins",
serializer=CloudFrontResourceList(),
)
default_cache_behavior = argument.Resource(
DefaultCacheBehavior,
field="DefaultCacheBehavior",
serializer=serializers.Resource(),
)
behaviors = argument.ResourceList(
CacheBehavior, field="CacheBehaviors", serializer=CloudFrontResourceList()
)
error_responses = argument.ResourceList(
ErrorResponse, field="CustomErrorResponses", serializer=CloudFrontResourceList()
)
logging = argument.Resource(
LoggingConfig,
default=lambda instance: dict(enabled=False),
field="Logging",
serializer=serializers.Resource(),
)
price_class = argument.String(
default="PriceClass_100",
choices=["PriceClass_100", "PriceClass_200", "PriceClass_All"],
field="PriceClass",
)
ssl_certificate = argument.Resource(
ServerCertificate,
field="Certificate",
group="viewer-certificate",
serializer=serializers.Property("ServerCertificateId"),
)
acm_certificate = argument.String(
field="ACMCertificateArn", group="viewer-certificate"
)
ssl_support_method = argument.String(
default="sni-only",
choices=["sni-only", "vip"],
field="SSLSupportMethod",
group="viewer-certificate",
)
ssl_minimum_protocol_version = argument.String(
default="TLSv1",
choices=["TLSv1", "SSLv3", "TLSv1.1_2016"],
field="MinimumProtocolVersion",
group="viewer-certificate",
)
viewer_certificate = argument.Serializer(
field="ViewerCertificate",
serializer=serializers.Resource(
group="viewer-certificate",
CertificateSource=serializers.Expression(
lambda r, o: "acm"
if o.acm_certificate
else "iam"
if o.ssl_certificate
else "cloudfront"
),
),
)
web_acl = argument.Resource(
WebACL, field="WebACLId", empty_serializer=serializers.Const("")
)
account = argument.Resource(BaseAccount)
class DistributionWaiter(Waiter):
def get_waiter_filters(self):
return {"Id": self.plan.object["Id"]}
class Describe(SimpleDescribe, Plan):
resource = Distribution
service_name = "cloudfront"
api_version = "2016-01-28"
describe_filters = {}
describe_action = "list_distributions"
describe_envelope = "DistributionList.Items"
key = "Id"
def get_waiter(self, description, waiter, eventual_consistency_threshold=1):
return DistributionWaiter(
self, description, waiter, eventual_consistency_threshold
)
def describe_object_matches(self, d):
return self.resource.name == d["Comment"] or self.resource.name in d[
"Aliases"
].get("Items", [])
def annotate_object(self, obj):
result = self.client.get_distribution(Id=obj["Id"])
distribution = {
"ETag": result["ETag"],
"Id": obj["Id"],
"DomainName": result["Distribution"]["DomainName"],
"Status": result["Distribution"]["Status"],
}
distribution.update(result["Distribution"]["DistributionConfig"])
return distribution
class Apply(SimpleApply, Describe):
create_action = "create_distribution"
update_action = "update_distribution"
create_response = "not-that-useful"
waiter = "distribution_deployed"
signature = (Present("name"), Present("origins"), Present("default_cache_behavior"))
def get_create_serializer(self):
return serializers.Dict(DistributionConfig=serializers.Resource())
def get_update_serializer(self):
return serializers.Dict(
Id=serializers.Identifier(),
DistributionConfig=serializers.Resource(),
IfMatch=serializers.Property("ETag"),
)
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_distribution"
def get_destroy_serializer(self):
return serializers.Dict(
Id=self.resource_id, IfMatch=serializers.Property("ETag")
)
def destroy_object(self):
if self.object.get("Enabled", False):
yield self.generic_action(
"Disable distribution",
self.client.update_distribution,
Id=self.object["Id"],
IfMatch=self.object["ETag"],
DistributionConfig=serializers.Resource(Enabled=False),
)
if (
self.object.get("Enabled", False)
or self.object.get("Status", "") == "InProgress"
):
yield self.get_waiter(
["Waiting for distribution to be disabled and enter state 'Deployed'"],
"distribution_deployed",
)
if self.object.get("Enabled", False):
yield RefreshMetadata(self)
for change in super(Destroy, self).destroy_object():
yield change
class AliasTarget(route53.AliasTarget):
""" Adapts a Distribution into a AliasTarget """
web_distribution = argument.Resource(
Distribution,
field="DNSName",
serializer=serializers.Context(
serializers.Property("DomainName"),
serializers.Expression(lambda r, o: route53._normalize(o)),
),
)
hosted_zone_id = argument.String(default="Z2FDTNDATAQYW2", field="HostedZoneId")
evaluate_target_health = argument.Boolean(
default=False, field="EvaluateTargetHealth"
)
@classmethod
def clean(cls, value):
if isinstance(value, Distribution):
return super(AliasTarget, cls).clean({"web_distribution": value})
return super(AliasTarget, cls).clean(value)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from keystone.common import sql
from keystone import exception
from keystone.i18n import _LI
from keystone import token
from keystone.token import provider
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class TokenModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'token'
attributes = ['id', 'expires', 'user_id', 'trust_id']
id = sql.Column(sql.String(64), primary_key=True)
expires = sql.Column(sql.DateTime(), default=None)
extra = sql.Column(sql.JsonBlob())
valid = sql.Column(sql.Boolean(), default=True, nullable=False)
user_id = sql.Column(sql.String(64))
trust_id = sql.Column(sql.String(64))
__table_args__ = (
sql.Index('ix_token_expires', 'expires'),
sql.Index('ix_token_expires_valid', 'expires', 'valid'),
sql.Index('ix_token_user_id', 'user_id'),
sql.Index('ix_token_trust_id', 'trust_id')
)
def _expiry_range_batched(session, upper_bound_func, batch_size):
"""Returns the stop point of the next batch for expiration.
Return the timestamp of the next token that is `batch_size` rows from
being the oldest expired token.
"""
# This expiry strategy splits the tokens into roughly equal sized batches
# to be deleted. It does this by finding the timestamp of a token
# `batch_size` rows from the oldest token and yielding that to the caller.
# It's expected that the caller will then delete all rows with a timestamp
# equal to or older than the one yielded. This may delete slightly more
# tokens than the batch_size, but that should be ok in almost all cases.
LOG.debug('Token expiration batch size: %d', batch_size)
query = session.query(TokenModel.expires)
query = query.filter(TokenModel.expires < upper_bound_func())
query = query.order_by(TokenModel.expires)
query = query.offset(batch_size - 1)
query = query.limit(1)
while True:
try:
next_expiration = query.one()[0]
except sql.NotFound:
# There are less than `batch_size` rows remaining, so fall
# through to the normal delete
break
yield next_expiration
yield upper_bound_func()
def _expiry_range_all(session, upper_bound_func):
"""Expires all tokens in one pass."""
yield upper_bound_func()
class Token(token.persistence.TokenDriverV8):
# Public interface
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id=token_id)
session = sql.get_session()
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
return token_ref.to_dict()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
if not data_copy.get('expires'):
data_copy['expires'] = provider.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
token_ref = TokenModel.from_dict(data_copy)
token_ref.valid = True
session = sql.get_session()
with session.begin():
session.add(token_ref)
return token_ref.to_dict()
def delete_token(self, token_id):
session = sql.get_session()
with session.begin():
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
token_ref.valid = False
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Deletes all tokens in one session
The user_id will be ignored if the trust_id is specified. user_id
will always be specified.
If using a trust, the token's user_id is set to the trustee's user ID
or the trustor's user ID, so will use trust_id to query the tokens.
"""
session = sql.get_session()
token_list = []
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter_by(valid=True)
query = query.filter(TokenModel.expires > now)
if trust_id:
query = query.filter(TokenModel.trust_id == trust_id)
else:
query = query.filter(TokenModel.user_id == user_id)
for token_ref in query.all():
if tenant_id:
token_ref_dict = token_ref.to_dict()
if not self._tenant_matches(tenant_id, token_ref_dict):
continue
if consumer_id:
token_ref_dict = token_ref.to_dict()
if not self._consumer_matches(consumer_id, token_ref_dict):
continue
token_ref.valid = False
token_list.append(token_ref.id)
return token_list
def _tenant_matches(self, tenant_id, token_ref_dict):
return ((tenant_id is None) or
(token_ref_dict.get('tenant') and
token_ref_dict['tenant'].get('id') == tenant_id))
def _consumer_matches(self, consumer_id, ref):
if consumer_id is None:
return True
else:
try:
oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
return oauth and oauth['consumer_id'] == consumer_id
except KeyError:
return False
def _list_tokens_for_trust(self, trust_id):
session = sql.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.trust_id == trust_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens_for_user(self, user_id, tenant_id=None):
session = sql.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._tenant_matches(tenant_id, token_ref_dict):
tokens.append(token_ref['id'])
return tokens
def _list_tokens_for_consumer(self, user_id, consumer_id):
tokens = []
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._consumer_matches(consumer_id, token_ref_dict):
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return []
if trust_id:
return self._list_tokens_for_trust(trust_id)
if consumer_id:
return self._list_tokens_for_consumer(user_id, consumer_id)
else:
return self._list_tokens_for_user(user_id, tenant_id)
def list_revoked_tokens(self):
session = sql.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel.id, TokenModel.expires)
query = query.filter(TokenModel.expires > now)
token_references = query.filter_by(valid=False)
for token_ref in token_references:
record = {
'id': token_ref[0],
'expires': token_ref[1],
}
tokens.append(record)
return tokens
def _expiry_range_strategy(self, dialect):
"""Choose a token range expiration strategy
Based on the DB dialect, select an expiry range callable that is
appropriate.
"""
# DB2 and MySQL can both benefit from a batched strategy. On DB2 the
# transaction log can fill up and on MySQL w/Galera, large
# transactions can exceed the maximum write set size.
if dialect == 'ibm_db_sa':
# Limit of 100 is known to not fill a transaction log
# of default maximum size while not significantly
# impacting the performance of large token purges on
# systems where the maximum transaction log size has
# been increased beyond the default.
return functools.partial(_expiry_range_batched,
batch_size=100)
elif dialect == 'mysql':
# We want somewhat more than 100, since Galera replication delay is
# at least RTT*2. This can be a significant amount of time if
# doing replication across a WAN.
return functools.partial(_expiry_range_batched,
batch_size=1000)
return _expiry_range_all
def flush_expired_tokens(self):
session = sql.get_session()
dialect = session.bind.dialect.name
expiry_range_func = self._expiry_range_strategy(dialect)
query = session.query(TokenModel.expires)
total_removed = 0
upper_bound_func = timeutils.utcnow
for expiry_time in expiry_range_func(session, upper_bound_func):
delete_query = query.filter(TokenModel.expires <=
expiry_time)
row_count = delete_query.delete(synchronize_session=False)
total_removed += row_count
LOG.debug('Removed %d total expired tokens', total_removed)
session.flush()
LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
from eventlet.green import httplib
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warn(_('Access key %(access_key)s has had %(failures)d '
'failed authentications and will be locked out '
'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=response.status)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(detail=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s'),
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': utils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
if ex.args and status < 500:
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("Unexpected %(ex_name)s raised")
else:
log_fun = LOG.debug
if ex.args:
log_msg = _("%(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("%(ex_name)s raised")
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.NotAuthorized,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "git-describe"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "domain_event_broker/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
import jsonschema
import pytest
from ..schemapi import (UndefinedType, SchemaBase, Undefined, _FromDict,
SchemaValidationError)
# Make tests inherit from _TestSchema, so that when we test from_dict it won't
# try to use SchemaBase objects defined elsewhere as wrappers.
class _TestSchema(SchemaBase):
@classmethod
def _default_wrapper_classes(cls):
return _TestSchema.__subclasses__()
class MySchema(_TestSchema):
_schema = {
'definitions': {
'StringMapping': {'type': 'object', 'additionalProperties': {'type': 'string'}},
'StringArray': {'type': 'array', 'items': {'type': 'string'}}
},
'properties': {
'a': {'$ref': '#/definitions/StringMapping'},
'a2': {'type': 'object', 'additionalProperties': {'type': 'number'}},
'b': {'$ref': '#/definitions/StringArray'},
'b2': {'type': 'array', 'items': {'type': 'number'}},
'c': {'type': ['string', 'number']},
'd': {'anyOf': [{'$ref': '#/definitions/StringMapping'},
{'$ref': '#/definitions/StringArray'}]},
'e': {'items': [{'type': 'string'}, {'type': 'string'}]}
}
}
class StringMapping(_TestSchema):
_schema = {'$ref': '#/definitions/StringMapping'}
_rootschema = MySchema._schema
class StringArray(_TestSchema):
_schema = {'$ref': '#/definitions/StringArray'}
_rootschema = MySchema._schema
class Derived(_TestSchema):
_schema = {
'definitions': {
'Foo': {
'type': 'object',
'properties': {
'd': {'type': 'string'}
}
},
'Bar': {'type': 'string', 'enum': ['A', 'B']}
},
'type': 'object',
'additionalProperties': False,
'properties': {
'a': {'type': 'integer'},
'b': {'type': 'string'},
'c': {"$ref": "#/definitions/Foo"}
}
}
class Foo(_TestSchema):
_schema = {"$ref": "#/definitions/Foo"}
_rootschema = Derived._schema
class Bar(_TestSchema):
_schema = {"$ref": "#/definitions/Bar"}
_rootschema = Derived._schema
class SimpleUnion(_TestSchema):
_schema = {'anyOf' : [{'type': 'integer'}, {'type': 'string'}]}
class DefinitionUnion(_TestSchema):
_schema = {
"anyOf": [
{"$ref": "#/definitions/Foo"},
{"$ref": "#/definitions/Bar"}
]
}
_rootschema = Derived._schema
class SimpleArray(_TestSchema):
_schema = {
'type': 'array',
'items': {
'anyOf' : [{'type': 'integer'}, {'type': 'string'}]
}
}
class InvalidProperties(_TestSchema):
_schema = {
'type': 'object',
'properties': {
'for': {},
'as': {},
'vega-lite': {},
'$schema': {}
}
}
def test_construct_multifaceted_schema():
dct = {'a': {'foo': 'bar'}, 'a2': {'foo': 42},
'b': ['a', 'b', 'c'], 'b2': [1, 2, 3], 'c': 42,
'd': ['x', 'y', 'z'], 'e': ['a', 'b']}
myschema = MySchema.from_dict(dct)
assert myschema.to_dict() == dct
myschema2 = MySchema(**dct)
assert myschema2.to_dict() == dct
assert isinstance(myschema.a, StringMapping)
assert isinstance(myschema.a2, dict)
assert isinstance(myschema.b, StringArray)
assert isinstance(myschema.b2, list)
assert isinstance(myschema.d, StringArray)
def test_schema_cases():
assert Derived(a=4, b='yo').to_dict() == {'a': 4, 'b': 'yo'}
assert Derived(a=4, c={'d': 'hey'}).to_dict() == {'a': 4, 'c': {'d': 'hey'}}
assert Derived(a=4, b='5', c=Foo(d='val')).to_dict() == {'a': 4, 'b': '5', 'c': {'d': 'val'}}
assert Foo(d='hello', f=4).to_dict() == {'d': 'hello', 'f': 4}
assert Derived().to_dict() == {}
assert Foo().to_dict() == {}
with pytest.raises(jsonschema.ValidationError):
# a needs to be an integer
Derived(a='yo').to_dict()
with pytest.raises(jsonschema.ValidationError):
# Foo.d needs to be a string
Derived(c=Foo(4)).to_dict()
with pytest.raises(jsonschema.ValidationError):
# no additional properties allowed
Derived(foo='bar').to_dict()
def test_round_trip():
D = {'a': 4, 'b': 'yo'}
assert Derived.from_dict(D).to_dict() == D
D = {'a': 4, 'c': {'d': 'hey'}}
assert Derived.from_dict(D).to_dict() == D
D = {'a': 4, 'b': '5', 'c': {'d': 'val'}}
assert Derived.from_dict(D).to_dict() == D
D = {'d': 'hello', 'f': 4}
assert Foo.from_dict(D).to_dict() == D
def test_from_dict():
D = {'a': 4, 'b': '5', 'c': {'d': 'val'}}
obj = Derived.from_dict(D)
assert obj.a == 4
assert obj.b == '5'
assert isinstance(obj.c, Foo)
def test_simple_type():
assert SimpleUnion(4).to_dict() == 4
def test_simple_array():
assert SimpleArray([4, 5, 'six']).to_dict() == [4, 5, 'six']
assert SimpleArray.from_dict(list('abc')).to_dict() == list('abc')
def test_definition_union():
obj = DefinitionUnion.from_dict("A")
assert isinstance(obj, Bar)
assert obj.to_dict() == "A"
obj = DefinitionUnion.from_dict("B")
assert isinstance(obj, Bar)
assert obj.to_dict() == "B"
obj = DefinitionUnion.from_dict({'d': 'yo'})
assert isinstance(obj, Foo)
assert obj.to_dict() == {'d': 'yo'}
def test_invalid_properties():
dct = {'for': 2, 'as': 3, 'vega-lite': 4, '$schema': 5}
invalid = InvalidProperties.from_dict(dct)
assert invalid['for'] == 2
assert invalid['as'] == 3
assert invalid['vega-lite'] == 4
assert invalid['$schema'] == 5
assert invalid.to_dict() == dct
def test_undefined_singleton():
assert Undefined is UndefinedType()
def test_copy():
dct = {'a': {'foo': 'bar'}, 'a2': {'foo': 42},
'b': ['a', 'b', 'c'], 'b2': [1, 2, 3], 'c': 42,
'd': ['x', 'y', 'z']}
myschema = MySchema.from_dict(dct)
# Make sure copy is deep
copy = myschema.copy(deep=True)
copy['a']['foo'] = 'new value'
copy['b'] = ['A', 'B', 'C']
copy['c'] = 164
assert myschema.to_dict() == dct
# If we ignore a value, changing the copy changes the original
copy = myschema.copy(deep=True, ignore=['a'])
copy['a']['foo'] = 'new value'
copy['b'] = ['A', 'B', 'C']
copy['c'] = 164
mydct = myschema.to_dict()
assert mydct['a']['foo'] == 'new value'
assert mydct['b'][0] == dct['b'][0]
assert mydct['c'] == dct['c']
# If copy is not deep, then changing copy below top level changes original
copy = myschema.copy(deep=False)
copy['a']['foo'] = 'baz'
copy['b'] = ['A', 'B', 'C']
copy['c'] = 164
mydct = myschema.to_dict()
assert mydct['a']['foo'] == 'baz'
assert mydct['b'] == dct['b']
assert mydct['c'] == dct['c']
def test_attribute_error():
m = MySchema()
with pytest.raises(AttributeError) as err:
m.invalid_attribute
assert str(err.value) == ("'MySchema' object has no attribute "
"'invalid_attribute'")
def test_to_from_json():
dct = {'a': {'foo': 'bar'}, 'a2': {'foo': 42},
'b': ['a', 'b', 'c'], 'b2': [1, 2, 3], 'c': 42,
'd': ['x', 'y', 'z'], 'e': ['g', 'h']}
json_str = MySchema.from_dict(dct).to_json()
new_dct = MySchema.from_json(json_str).to_dict()
assert new_dct == dct
def test_class_with_no_schema():
class BadSchema(SchemaBase):
pass
with pytest.raises(ValueError) as err:
BadSchema(4)
assert str(err.value).startswith("Cannot instantiate object")
@pytest.mark.parametrize('use_json', [True, False])
def test_hash_schema(use_json):
classes = _TestSchema._default_wrapper_classes()
for cls in classes:
hsh1 = _FromDict.hash_schema(cls._schema, use_json=use_json)
hsh2 = _FromDict.hash_schema(cls._schema, use_json=use_json)
assert hsh1 == hsh2
assert hash(hsh1) == hash(hsh2)
def test_schema_validation_error():
try:
MySchema(a={'foo': 4})
the_err = None
except jsonschema.ValidationError as err:
the_err = err
assert isinstance(the_err, SchemaValidationError)
message = str(the_err)
assert message.startswith('Invalid specification')
assert 'test_schemapi.MySchema->a' in message
assert "validating {!r}".format(the_err.validator) in message
assert the_err.message in message
|
|
# -*- coding: utf-8 -*-
"""
sphinx.util.jsdump
~~~~~~~~~~~~~~~~~~
This module implements a simple JavaScript serializer.
Uses the basestring encode function from simplejson by Bob Ippolito.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import iteritems, integer_types, string_types
from sphinx.util.pycompat import u
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z]\w*')
_nameonly_re = re.compile(r'[a-zA-Z]\w*$')
# escape \, ", control characters and everything outside ASCII
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
ESCAPE_DICT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DICT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def decode_string(s):
return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
reswords = set("""\
abstract else instanceof switch
boolean enum int synchronized
break export interface this
byte extends long throw
case false native throws
catch final new transient
char finally null true
class float package try
const for private typeof
continue function protected var
debugger goto public void
default if return volatile
delete implements short while
do import static with
double in super""".split())
def dumps(obj, key=False):
if key:
if not isinstance(obj, string_types):
obj = str(obj)
if _nameonly_re.match(obj) and obj not in reswords:
return obj # return it as a bare word
else:
return encode_string(obj)
if obj is None:
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
elif isinstance(obj, integer_types + (float,)):
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join(sorted('%s:%s' % (
dumps(key, True),
dumps(value)
) for key, value in iteritems(obj)))
elif isinstance(obj, set):
return '[%s]' % ','.join(sorted(dumps(x) for x in obj))
elif isinstance(obj, (tuple, list)):
return '[%s]' % ','.join(dumps(x) for x in obj)
elif isinstance(obj, string_types):
return encode_string(obj)
raise TypeError(type(obj))
def dump(obj, f):
f.write(dumps(obj))
def loads(x):
"""Loader that can read the JS subset the indexer produces."""
nothing = object()
i = 0
n = len(x)
stack = []
obj = nothing
key = False
keys = []
while i < n:
c = x[i]
if c == '{':
obj = {}
stack.append(obj)
key = True
keys.append(nothing)
i += 1
elif c == '[':
obj = []
stack.append(obj)
key = False
keys.append(nothing)
i += 1
elif c in '}]':
if key:
if keys[-1] is not nothing:
raise ValueError("unfinished dict")
# empty dict
key = False
oldobj = stack.pop()
keys.pop()
if stack:
obj = stack[-1]
if isinstance(obj, dict):
if keys[-1] is nothing:
raise ValueError("invalid key object", oldobj)
obj[keys[-1]] = oldobj
else:
obj.append(oldobj)
else:
break
i += 1
elif c == ',':
if key:
raise ValueError("multiple keys")
if isinstance(obj, dict):
key = True
i += 1
elif c == ':':
if not isinstance(obj, dict):
raise ValueError("colon in list")
i += 1
if not key:
raise ValueError("multiple values")
key = False
else:
m = _str_re.match(x, i)
if m:
y = decode_string(m.group()[1:-1])
else:
m = _int_re.match(x, i)
if m:
y = int(m.group())
else:
m = _name_re.match(x, i)
if m:
y = m.group()
if y == 'true':
y = True
elif y == 'false':
y = False
elif y == 'null':
y = None
elif not key:
raise ValueError("bareword as value")
else:
raise ValueError("read error at pos %d" % i)
i = m.end()
if isinstance(obj, dict):
if key:
keys[-1] = y
else:
obj[keys[-1]] = y
key = False
else:
obj.append(y)
if obj is nothing:
raise ValueError("nothing loaded from string")
return obj
def load(f):
return loads(f.read())
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from workspace_tools.paths import *
from workspace_tools.data.support import *
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
PERIPHERALS = join(TEST_DIR, "peripherals")
BENCHMARKS_DIR = join(TEST_DIR, "benchmarks")
SD = join(TEST_DIR, "sd")
TMP102 = join(PERIPHERALS, 'TMP102')
"""
Wiring:
* Ground:
* LPC1*: p1
* KL25Z: GND
* Vout
* LPC1*: p40
* KL25Z: P3V3
* TMP102 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTC9, SCL=PTC8)
* MAXWSNENV: (SDA=TP6, SCL=TP5)
* digital_loop (Digital(In|Out|InOut), InterruptIn):
* Arduino headers: (D0 <-> D7)
* LPC1549: (D2 <-> D7)
* LPC1*: (p5 <-> p25 )
* KL25Z: (PTA5<-> PTC6)
* NUCLEO_F103RB: (PC_6 <-> PB_8)
* MAXWSNENV: (TP3 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7)
* port_loop (Port(In|Out|InOut)):
* Arduino headers: (D0 <-> D7), (D1 <-> D6)
* LPC1*: (p5 <-> p25), (p6 <-> p26)
* KL25Z: (PTA5 <-> PTC6), (PTA4 <-> PTC5)
* NUCLEO_F103RB: (PC_6 <-> PB_8), (PC_5 <-> PB_9)
* MAXWSNENV: (TP1 <-> TP3), (TP2 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7), (P1_1 <-> P4_6)
* analog_loop (AnalogIn, AnalogOut):
* Arduino headers: (A0 <-> A5)
* LPC1549: (A0 <-> D12)
* LPC1*: (p17 <-> p18 )
* KL25Z: (PTE30 <-> PTC2)
* analog_pot (AnalogIn):
* Arduino headers: (A0, A1)
* SD (SPI):
* LPC1*: (mosi=p11 , miso=p12 , sclk=p13 , cs=p14 )
* KL25Z: (mosi=PTD2, miso=PTD3, sclk=PTD1, cs=PTD0)
* MMA7660 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* i2c_loop:
* LPC1768: (p28 <-> p9), (p27 <-> p10)
* i2c_eeprom:
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTE0, SCL=PTE1)
* can_transceiver:
* LPC1768: (RX=p9, TX=p10)
* LPC1549: (RX=D9, TX=D8)
* LPC4088: (RX=p9, TX=p10)
"""
TESTS = [
# Automated MBED tests
{
"id": "MBED_A1", "description": "Basic",
"source_dir": join(TEST_DIR, "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "mbed", "file"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_A3", "description": "C++ STL",
"source_dir": join(TEST_DIR, "mbed", "stl"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_A4", "description": "I2C TMP102",
"source_dir": join(TEST_DIR, "mbed", "i2c_TMP102"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, TMP102],
"automated": True,
"peripherals": ["TMP102"]
},
{
"id": "MBED_A5", "description": "DigitalIn DigitalOut",
"source_dir": join(TEST_DIR, "mbed", "digitalin_digitalout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A6", "description": "DigitalInOut",
"source_dir": join(TEST_DIR, "mbed", "digitalinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A7", "description": "InterruptIn",
"source_dir": join(TEST_DIR, "mbed", "interruptin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A8", "description": "Analog",
"source_dir": join(TEST_DIR, "mbed", "analog"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["analog_loop"],
"mcu": ["LPC1768", "LPC2368", "LPC2460", "KL25Z", "K64F", "K22F", "LPC4088", "LPC1549",
"NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_F302R8", "NUCLEO_F303RE",
"NUCLEO_F334R8", "NUCLEO_L053R8", "NUCLEO_L073RZ", "NUCLEO_L152RE",
"NUCLEO_F411RE", "NUCLEO_F446RE", "DISCO_F407VG", "DISCO_F746NG", "ARCH_MAX", "MAX32600MBED"]
},
{
"id": "MBED_A9", "description": "Serial Echo at 115200",
"source_dir": join(TEST_DIR, "mbed", "echo"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "echo"
},
{
"id": "MBED_A10", "description": "PortOut PortIn",
"source_dir": join(TEST_DIR, "mbed", "portout_portin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A11", "description": "PortInOut",
"source_dir": join(TEST_DIR, "mbed", "portinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A12", "description": "SD File System",
"source_dir": join(TEST_DIR, "mbed", "sd"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "MBED_A13", "description": "I2C MMA7660 accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA7660"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA7660')],
"automated": True,
"peripherals": ["MMA7660"]
},
{
"id": "MBED_A14", "description": "I2C Master",
"source_dir": join(TEST_DIR, "mbed", "i2c_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A15", "description": "I2C Slave",
"source_dir": join(TEST_DIR, "mbed", "i2c_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A16", "description": "SPI Master",
"source_dir": join(TEST_DIR, "mbed", "spi_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A17", "description": "SPI Slave",
"source_dir": join(TEST_DIR, "mbed", "spi_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A18", "description": "Interrupt vector relocation",
"source_dir": join(TEST_DIR, "mbed", "vtor_reloc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768"],
"automated": True,
},
{
"id": "MBED_A19", "description": "I2C EEPROM read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 15,
},
{
"id": "MBED_A20", "description": "I2C master/slave test",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768", "RZ_A1H"],
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_A21", "description": "Call function before main (mbed_main)",
"source_dir": join(TEST_DIR, "mbed", "call_before_main"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A22", "description": "SPIFI for LPC4088 (test 1)",
"source_dir": join(TEST_DIR, "mbed", "spifi1"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A23", "description": "SPIFI for LPC4088 (test 2)",
"source_dir": join(TEST_DIR, "mbed", "spifi2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 30,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A24", "description": "Serial echo with RTS/CTS flow control",
"source_dir": join(TEST_DIR, "mbed", "echo_flow_control"),
"dependencies": [MBED_LIBRARIES],
"automated": "True",
"host_test": "echo_flow_control",
"mcu": ["LPC1768"],
"peripherals": ["extra_serial"]
},
{
"id": "MBED_A25", "description": "I2C EEPROM line read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom_line"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A26", "description": "AnalogIn potentiometer test",
"source_dir": join(TEST_DIR, "mbed", "analog_pot"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["analog_pot"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_A27", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 20,
"peripherals": ["can_transceiver"],
"mcu": ["LPC1549", "LPC1768"],
},
{
"id": "MBED_BLINKY", "description": "Blinky",
"source_dir": join(TEST_DIR, "mbed", "blinky"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUS", "description": "Blinky BUS",
"source_dir": join(TEST_DIR, "mbed", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
"duration": 15,
},
{
"id": "MBED_BUSOUT", "description": "BusOut",
"source_dir": join(TEST_DIR, "mbed", "bus_out"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"duration": 15,
},
# Size benchmarks
{
"id": "BENCHMARK_1", "description": "Size (c environment)",
"source_dir": join(BENCHMARKS_DIR, "cenv"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_2", "description": "Size (float math)",
"source_dir": join(BENCHMARKS_DIR, "float_math"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_3", "description": "Size (printf)",
"source_dir": join(BENCHMARKS_DIR, "printf"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_4", "description": "Size (mbed libs)",
"source_dir": join(BENCHMARKS_DIR, "mbed"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_5", "description": "Size (all)",
"source_dir": join(BENCHMARKS_DIR, "all"),
"dependencies": [MBED_LIBRARIES]
},
# performance related tests
{
"id": "PERF_1", "description": "SD Stdio R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_2", "description": "SD FileHandle R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fhandle"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
{
"id": "PERF_3", "description": "SD FatFS R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fatfs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"duration": 15,
"peripherals": ["SD"]
},
# Not automated MBED tests
{
"id": "MBED_1", "description": "I2C SRF08",
"source_dir": join(TEST_DIR, "mbed", "i2c_SRF08"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'SRF08')],
"peripherals": ["SRF08"]
},
{
"id": "MBED_2", "description": "stdio",
"source_dir": join(TEST_DIR, "mbed", "stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
#"host_test": "stdio_auto"
},
{
"id": "MBED_3", "description": "PortOut",
"source_dir": join(TEST_DIR, "mbed", "portout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_4", "description": "Sleep",
"source_dir": join(TEST_DIR, "mbed", "sleep"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 30,
"mcu": ["LPC1768", "LPC11U24", "LPC4088","LPC4088_DM","NRF51822", "LPC11U68"]
},
{
"id": "MBED_5", "description": "PWM",
"source_dir": join(TEST_DIR, "mbed", "pwm"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_6", "description": "SW Reset",
"source_dir": join(TEST_DIR, "mbed", "reset"),
"dependencies": [MBED_LIBRARIES],
"duration": 15
},
{
"id": "MBED_7", "description": "stdio benchmark",
"source_dir": join(TEST_DIR, "mbed", "stdio_benchmark"),
"dependencies": [MBED_LIBRARIES],
"duration": 40
},
{
"id": "MBED_8", "description": "SPI",
"source_dir": join(TEST_DIR, "mbed", "spi"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_9", "description": "Sleep Timeout",
"source_dir": join(TEST_DIR, "mbed", "sleep_timeout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_10", "description": "Hello World",
"source_dir": join(TEST_DIR, "mbed", "hello"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "hello_auto",
},
{
"id": "MBED_11", "description": "Ticker Int",
"source_dir": join(TEST_DIR, "mbed", "ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"duration": 20,
},
{
"id": "MBED_12", "description": "C++",
"source_dir": join(TEST_DIR, "mbed", "cpp"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_13", "description": "Heap & Stack",
"source_dir": join(TEST_DIR, "mbed", "heap_and_stack"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_14", "description": "Serial Interrupt",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_15", "description": "RPC",
"source_dir": join(TEST_DIR, "mbed", "rpc"),
"dependencies": [MBED_LIBRARIES, join(LIB_DIR, "rpc"), TEST_MBED_LIB],
"automated": False,
"mcu": ["LPC1768"]
},
{
"id": "MBED_16", "description": "RTC",
"source_dir": join(TEST_DIR, "mbed", "rtc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "rtc_auto",
"duration": 15
},
{
"id": "MBED_17", "description": "Serial Interrupt 2",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_18", "description": "Local FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_19", "description": "SD FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir_sd"),
"dependencies": [MBED_LIBRARIES, FS_LIBRARY],
"peripherals": ["SD"]
},
{
"id": "MBED_20", "description": "InterruptIn 2",
"source_dir": join(TEST_DIR, "mbed", "interruptin_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_21", "description": "freopen Stream",
"source_dir": join(TEST_DIR, "mbed", "freopen"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_22", "description": "Semihost",
"source_dir": join(TEST_DIR, "mbed", "semihost"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_23", "description": "Ticker Int us",
"source_dir": join(TEST_DIR, "mbed", "ticker_2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_24", "description": "Timeout Int us",
"source_dir": join(TEST_DIR, "mbed", "timeout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_25", "description": "Time us",
"source_dir": join(TEST_DIR, "mbed", "time_us"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_26", "description": "Integer constant division",
"source_dir": join(TEST_DIR, "mbed", "div"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_27", "description": "SPI ADXL345",
"source_dir": join(TEST_DIR, "mbed", "spi_ADXL345"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'ADXL345')],
"peripherals": ["ADXL345"]
},
{
"id": "MBED_28", "description": "Interrupt chaining (InterruptManager)",
"source_dir": join(TEST_DIR, "mbed", "interrupt_chaining"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_29", "description": "CAN network test",
"source_dir": join(TEST_DIR, "mbed", "can"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H"]
},
{
"id": "MBED_30", "description": "CAN network test using interrupts",
"source_dir": join(TEST_DIR, "mbed", "can_interrupt"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H"]
},
{
"id": "MBED_31", "description": "PWM LED test",
"source_dir": join(TEST_DIR, "mbed", "pwm_led"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_32", "description": "Pin toggling",
"source_dir": join(TEST_DIR, "mbed", "pin_toggling"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_33", "description": "C string operations",
"source_dir": join(TEST_DIR, "mbed", "cstring"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 10,
"automated": False,
},
{
"id": "MBED_34", "description": "Ticker Two callbacks",
"source_dir": join(TEST_DIR, "mbed", "ticker_3"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_35", "description": "SPI C12832 display",
"source_dir": join(TEST_DIR, "mbed", "spi_C12832"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'C12832')],
"peripherals": ["C12832"],
"automated": True,
"duration": 10,
},
{
"id": "MBED_36", "description": "WFI correct behavior",
"source_dir": join(TEST_DIR, "mbed", "wfi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_37", "description": "Serial NC RX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_rx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_38", "description": "Serial NC TX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_tx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
# CMSIS RTOS tests
{
"id": "CMSIS_RTOS_1", "description": "Basic",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_2", "description": "Mutex",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_3", "description": "Semaphore",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_4", "description": "Signals",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_5", "description": "Queue",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_6", "description": "Mail",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
"duration": 20
},
{
"id": "CMSIS_RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_8", "description": "ISR",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
# mbed RTOS tests
{
"id": "RTOS_1", "description": "Basic thread",
"source_dir": join(TEST_DIR, "rtos", "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_2", "description": "Mutex resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_3", "description": "Semaphore resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 20,
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_4", "description": "Signals messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_5", "description": "Queue messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_6", "description": "Mail messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "mbed", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"duration": 15,
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_8", "description": "ISR (Queue)",
"source_dir": join(TEST_DIR, "rtos", "mbed", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K64F", "KL46Z",
"RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F030R8", "NUCLEO_F070RB",
"NUCLEO_L053R8", "DISCO_L053C8", "NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG"],
},
{
"id": "RTOS_9", "description": "SD File write-read",
"source_dir": join(TEST_DIR, "rtos", "mbed", "file"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"],
"mcu": ["LPC1768", "LPC11U24", "LPC812", "KL25Z",
"KL05Z", "K64F", "KL46Z", "RZ_A1H",
"DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F401RE"],
},
# Networking Tests
{
"id": "NET_1", "description": "TCP client hello world",
"source_dir": join(TEST_DIR, "net", "helloworld", "tcpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_2", "description": "NIST Internet Time Service",
"source_dir": join(TEST_DIR, "net", "helloworld", "udpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"duration": 15,
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_3", "description": "TCP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "tcpecho_server_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_4", "description": "TCP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_5", "description": "UDP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "udp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_server_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_6", "description": "UDP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "udp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_7", "description": "HTTP client hello world",
"source_dir": join(TEST_DIR, "net", "protocols", "HTTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
"peripherals": ["ethernet"],
},
{
"id": "NET_8", "description": "NTP client",
"source_dir": join(TEST_DIR, "net", "protocols", "NTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_9", "description": "Multicast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_10", "description": "Multicast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_11", "description": "Broadcast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_12", "description": "Broadcast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_13", "description": "TCP client echo loop",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client_loop"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"duration": 15,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_14", "description": "UDP PHY/Data link layer",
"source_dir": join(TEST_DIR, "net", "echo", "udp_link_layer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"automated": False,
"duration": 20,
"host_test": "udp_link_layer_auto",
"peripherals": ["ethernet"],
},
# u-blox tests
{
"id": "UB_1", "description": "u-blox USB modem: HTTP client",
"source_dir": [join(TEST_DIR, "net", "cellular", "http", "ubloxusb"), join(TEST_DIR, "net", "cellular", "http", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "UB_2", "description": "u-blox USB modem: SMS test",
"source_dir": [join(TEST_DIR, "net", "cellular", "sms", "ubloxusb"), join(TEST_DIR, "net", "cellular", "sms", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
# USB Tests
{
"id": "USB_1", "description": "Mouse",
"source_dir": join(TEST_DIR, "usb", "device", "basic"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_2", "description": "Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_3", "description": "Mouse_Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_4", "description": "Serial Port",
"source_dir": join(TEST_DIR, "usb", "device", "serial"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "USB_5", "description": "Generic HID",
"source_dir": join(TEST_DIR, "usb", "device", "raw_hid"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_6", "description": "MIDI",
"source_dir": join(TEST_DIR, "usb", "device", "midi"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_7", "description": "AUDIO",
"source_dir": join(TEST_DIR, "usb", "device", "audio"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
# CMSIS DSP
{
"id": "CMSIS_DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "cmsis", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# mbed DSP
{
"id": "DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "mbed", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# KL25Z
{
"id": "KL25Z_1", "description": "LPTMR",
"source_dir": join(TEST_DIR, "KL25Z", "lptmr"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_2", "description": "PIT",
"source_dir": join(TEST_DIR, "KL25Z", "pit"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_3", "description": "TSI Touch Sensor",
"source_dir": join(TEST_DIR, "mbed", "tsi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'TSI')],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_4", "description": "RTC",
"source_dir": join(TEST_DIR, "KL25Z", "rtc"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_5", "description": "MMA8451Q accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA8451Q"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA8451Q')],
"mcu": ["KL25Z", "KL05Z", "KL46Z", "K20D50M"],
"automated": True,
"duration": 15,
},
# Examples
{
"id": "EXAMPLE_1", "description": "/dev/null",
"source_dir": join(TEST_DIR, "mbed", "dev_null"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "dev_null_auto",
},
{
"id": "EXAMPLE_2", "description": "FS + RTOS",
"source_dir": join(TEST_DIR, "mbed", "fs"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
},
# CPPUTEST Library provides Unit testing Framework
#
# To write TESTs and TEST_GROUPs please add CPPUTEST_LIBRARY to 'dependencies'
#
# This will also include:
# 1. test runner - main function with call to CommandLineTestRunner::RunAllTests(ac, av)
# 2. Serial console object to print test result on serial port console
#
# Unit testing with cpputest library
{
"id": "UT_1", "description": "Basic",
"source_dir": join(TEST_DIR, "utest", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "utest", "semihost_fs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "UT_3", "description": "General tests",
"source_dir": join(TEST_DIR, "utest", "general"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_BUSIO", "description": "BusIn BusOut",
"source_dir": join(TEST_DIR, "utest", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_I2C_EEPROM_ASYNCH", "description": "I2C Asynch eeprom",
"source_dir": join(TEST_DIR, "utest", "i2c_eeprom_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SERIAL_ASYNCH", "description": "Asynch serial test (req 2 serial peripherals)",
"source_dir": join(TEST_DIR, "utest", "serial_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SPI_ASYNCH", "description": "Asynch spi test",
"source_dir": join(TEST_DIR, "utest", "spi_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_LP_TICKER", "description": "Low power ticker test",
"source_dir": join(TEST_DIR, "utest", "lp_ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
# Tests used for target information purposes
{
"id": "DTCT_1", "description": "Simple detect test",
"source_dir": join(TEST_DIR, "mbed", "detect"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "detect_auto",
},
]
# Group tests with the same goals into categories
GROUPS = {
"core": ["MBED_A1", "MBED_A2", "MBED_A3", "MBED_A18"],
"digital_io": ["MBED_A5", "MBED_A6", "MBED_A7", "MBED_A10", "MBED_A11"],
"analog_io": ["MBED_A8"],
"i2c": ["MBED_A19", "MBED_A20"],
"spi": ["MBED_A12"],
}
GROUPS["rtos"] = [test["id"] for test in TESTS if test["id"].startswith("RTOS_")]
GROUPS["net"] = [test["id"] for test in TESTS if test["id"].startswith("NET_")]
GROUPS["automated"] = [test["id"] for test in TESTS if test.get("automated", False)]
# Look for 'TEST_GROUPS' in private_settings.py and update the GROUPS dictionary
# with the information in test_groups if found
try:
from workspace_tools.private_settings import TEST_GROUPS
except:
TEST_GROUPS = {}
GROUPS.update(TEST_GROUPS)
class Test:
DEFAULTS = {
#'mcu': None,
'description': None,
'dependencies': None,
'duration': 10,
'host_test': 'host_test',
'automated': False,
'peripherals': None,
#'supported': None,
'source_dir': None,
'extra_files': None
}
def __init__(self, n):
self.n = n
self.__dict__.update(Test.DEFAULTS)
self.__dict__.update(TESTS[n])
def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
def get_description(self):
if self.description:
return self.description
else:
return self.id
def __cmp__(self, other):
return cmp(self.n, other.n)
def __str__(self):
return "[%3d] %s: %s" % (self.n, self.id, self.get_description())
def __getitem__(self, key):
if key == "id": return self.id
elif key == "mcu": return self.mcu
elif key == "dependencies": return self.dependencies
elif key == "description": return self.description
elif key == "duration": return self.duration
elif key == "host_test": return self.host_test
elif key == "automated": return self.automated
elif key == "peripherals": return self.peripherals
elif key == "supported": return self.supported
elif key == "source_dir": return self.source_dir
elif key == "extra_files": return self.extra_files
else:
return None
TEST_MAP = dict([(test['id'], Test(i)) for i, test in enumerate(TESTS)])
|
|
#!/usr/bin/env python
"""Unittest for grr frontend server."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import communicator
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class SendingTestFlow(flow.GRRFlow):
"""Tests that sent messages are correctly collected."""
@flow.StateHandler(next_state="Incoming")
def Start(self):
for i in range(10):
self.CallClient("Test",
rdfvalue.DataBlob(string="test%s" % i),
data=str(i),
next_state="Incoming")
class GRRFEServerTest(test_lib.FlowTestsBaseclass):
"""Tests the GRRFEServer."""
string = "Test String"
def setUp(self):
"""Setup the server."""
super(GRRFEServerTest, self).setUp()
# Whitelist test flow.
config_lib.CONFIG.Set("Frontend.well_known_flows", [utils.SmartStr(
test_lib.WellKnownSessionTest.well_known_session_id)])
# For tests, small pools are ok.
config_lib.CONFIG.Set("Threadpool.size", 10)
prefix = "pool-%s" % self._testMethodName
self.server = flow.FrontEndServer(
certificate=config_lib.CONFIG["Frontend.certificate"],
private_key=config_lib.CONFIG["PrivateKeys.server_key"],
threadpool_prefix=prefix)
def CheckMessages(self, left, right):
"""Compares two lists of messages for equality.
Args:
left: A list of GrrMessage
right: A list of (task, GrrMessage)
Returns:
True if they are the same.
"""
if len(right) != len(left):
return False
for i in range(len(right)):
if left[i] != right[i][1]:
return False
return True
def testReceiveMessages(self):
"""Test Receiving messages with no status."""
flow_obj = self.FlowSetup("FlowOrderTest")
session_id = flow_obj.session_id
messages = [rdfvalue.GrrMessage(request_id=1,
response_id=i,
session_id=session_id,
args=str(i))
for i in range(1, 10)]
self.server.ReceiveMessages(self.client_id, messages)
# Make sure the task is still on the client queue
manager = queue_manager.QueueManager(token=self.token)
tasks_on_client_queue = manager.Query(self.client_id, 100)
self.assertEqual(len(tasks_on_client_queue), 1)
# Check that messages were stored correctly
for message in messages:
stored_message, _ = data_store.DB.Resolve(
session_id.Add("state/request:00000001"),
manager.FLOW_RESPONSE_TEMPLATE % (1, message.response_id),
token=self.token)
stored_message = rdfvalue.GrrMessage(stored_message)
self.assertProtoEqual(stored_message, message)
return messages
def testReceiveMessagesWithStatus(self):
"""Receiving a sequence of messages with a status."""
flow_obj = self.FlowSetup("FlowOrderTest")
session_id = flow_obj.session_id
messages = [rdfvalue.GrrMessage(request_id=1,
response_id=i,
session_id=session_id,
args=str(i),
task_id=15)
for i in range(1, 10)]
# Now add the status message
status = rdfvalue.GrrStatus(status=rdfvalue.GrrStatus.ReturnedStatus.OK)
messages.append(rdfvalue.GrrMessage(
request_id=1, response_id=len(messages)+1, task_id=15,
session_id=messages[0].session_id, payload=status,
type=rdfvalue.GrrMessage.Type.STATUS))
self.server.ReceiveMessages(self.client_id, messages)
# Make sure the task is still on the client queue
manager = queue_manager.QueueManager(token=self.token)
tasks_on_client_queue = manager.Query(self.client_id, 100)
self.assertEqual(len(tasks_on_client_queue), 1)
# Check that messages were stored correctly
for message in messages:
stored_message, _ = data_store.DB.Resolve(
session_id.Add("state/request:00000001"),
manager.FLOW_RESPONSE_TEMPLATE % (1, message.response_id),
token=self.token)
stored_message = rdfvalue.GrrMessage(stored_message)
self.assertProtoEqual(stored_message, message)
def testWellKnownFlows(self):
"""Make sure that well known flows can run on the front end."""
test_lib.WellKnownSessionTest.messages = []
session_id = test_lib.WellKnownSessionTest.well_known_session_id
messages = [rdfvalue.GrrMessage(request_id=0,
response_id=0,
session_id=session_id,
args=str(i))
for i in range(1, 10)]
self.server.ReceiveMessages(self.client_id, messages)
# Wait for async actions to complete
self.server.thread_pool.Join()
test_lib.WellKnownSessionTest.messages.sort()
# Well known flows are now directly processed on the front end
self.assertEqual(test_lib.WellKnownSessionTest.messages,
list(range(1, 10)))
# There should be nothing in the client_queue
self.assertEqual([], data_store.DB.ResolveRegex(self.client_id, "task:.*",
token=self.token))
def testWellKnownFlowsRemote(self):
"""Make sure that flows that do not exist on the front end get scheduled."""
test_lib.WellKnownSessionTest.messages = []
session_id = test_lib.WellKnownSessionTest.well_known_session_id
messages = [rdfvalue.GrrMessage(request_id=0,
response_id=0,
session_id=session_id,
args=str(i))
for i in range(1, 10)]
# Delete the local well known flow cache is empty.
self.server.well_known_flows = {}
self.server.ReceiveMessages(self.client_id, messages)
# Wait for async actions to complete
self.server.thread_pool.Join()
# None get processed now
self.assertEqual(test_lib.WellKnownSessionTest.messages, [])
# There should be nothing in the client_queue
self.assertEqual([], data_store.DB.ResolveRegex(self.client_id, "task:.*",
token=self.token))
# The well known flow messages should be waiting in the flow state now:
queued_messages = []
for predicate, _, _ in data_store.DB.ResolveRegex(
session_id.Add("state/request:00000000"), "flow:.*", token=self.token):
queued_messages.append(predicate)
self.assertEqual(len(queued_messages), 9)
def testDrainUpdateSessionRequestStates(self):
"""Draining the flow requests and preparing messages."""
# This flow sends 10 messages on Start()
flow_obj = self.FlowSetup("SendingTestFlow")
session_id = flow_obj.session_id
# There should be 10 messages in the client's task queue
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(self.client_id, 100)
self.assertEqual(len(tasks), 10)
# Check that the response state objects have the correct ts_id set
# in the client_queue:
for task in tasks:
request_id = task.request_id
# Retrieve the request state for this request_id
request_state, _ = data_store.DB.Resolve(
session_id.Add("state"),
manager.FLOW_REQUEST_TEMPLATE % request_id,
token=self.token)
request_state = rdfvalue.RequestState(request_state)
# Check that task_id for the client message is correctly set in
# request_state.
self.assertEqual(request_state.request.task_id, task.task_id)
# Now ask the server to drain the outbound messages into the
# message list.
response = rdfvalue.MessageList()
self.server.DrainTaskSchedulerQueueForClient(
self.client_id, 5, response)
# Check that we received only as many messages as we asked for
self.assertEqual(len(response.job), 5)
for i in range(4):
self.assertEqual(response.job[i].session_id, session_id)
self.assertEqual(response.job[i].name, "Test")
def testUpdateAndCheckIfShouldThrottle(self):
self.server.SetThrottleBundlesRatio(1.0)
# Let's assume that requests are flowing in every 10 seconds
self.server.UpdateAndCheckIfShouldThrottle(0)
self.server.UpdateAndCheckIfShouldThrottle(10)
self.server.UpdateAndCheckIfShouldThrottle(20)
self.server.UpdateAndCheckIfShouldThrottle(30)
self.server.UpdateAndCheckIfShouldThrottle(40)
self.server.UpdateAndCheckIfShouldThrottle(50)
self.server.UpdateAndCheckIfShouldThrottle(60)
self.server.SetThrottleBundlesRatio(0.3)
# Now: average interval between requests is 10 seconds
# According to throttling logic, requests will only be allowed if
# the interval between them is 10 / 0.3 = 33.3 seconds
result = self.server.UpdateAndCheckIfShouldThrottle(70)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(80)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(90)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(100)
self.assertEquals(result, False)
result = self.server.UpdateAndCheckIfShouldThrottle(110)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(120)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(130)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(140)
self.assertEquals(result, False)
# Now we throttle everything
self.server.SetThrottleBundlesRatio(0)
result = self.server.UpdateAndCheckIfShouldThrottle(141)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(142)
self.assertEquals(result, True)
result = self.server.UpdateAndCheckIfShouldThrottle(143)
self.assertEquals(result, True)
# Now we turn throttling off
self.server.SetThrottleBundlesRatio(None)
result = self.server.UpdateAndCheckIfShouldThrottle(144)
self.assertEquals(result, False)
result = self.server.UpdateAndCheckIfShouldThrottle(145)
self.assertEquals(result, False)
result = self.server.UpdateAndCheckIfShouldThrottle(146)
self.assertEquals(result, False)
def testHandleMessageBundle(self):
"""Check that HandleMessageBundles() requeues messages if it failed.
This test makes sure that when messages are pending for a client, and which
we have no certificate for, the messages are requeued when sending fails.
"""
# Make a new fake client
client_id = rdfvalue.ClientURN("C." + "2" * 16)
class MockCommunicator(object):
"""A fake that simulates an unenrolled client."""
def DecodeMessages(self, *unused_args):
"""For simplicity client sends an empty request."""
return ([], client_id, 100)
def EncodeMessages(self, *unused_args, **unused_kw):
"""Raise because the server has no certificates for this client."""
raise communicator.UnknownClientCert()
# Install the mock.
self.server._communicator = MockCommunicator()
# First request, the server will raise UnknownClientCert.
request_comms = rdfvalue.ClientCommunication()
self.assertRaises(communicator.UnknownClientCert,
self.server.HandleMessageBundles, request_comms, 2)
# We can still schedule a flow for it
flow.GRRFlow.StartFlow(client_id=client_id, flow_name="SendingFlow",
message_count=1, token=self.token)
manager = queue_manager.QueueManager(token=self.token)
tasks = manager.Query(client_id, limit=100)
self.assertRaises(communicator.UnknownClientCert,
self.server.HandleMessageBundles, request_comms, 2)
new_tasks = manager.Query(client_id, limit=100)
# The different in eta times reflect the lease that the server took on the
# client messages.
lease_time = (new_tasks[0].eta - tasks[0].eta)/1e6
# This lease time must be small, as the HandleMessageBundles() call failed,
# the pending client messages must be put back on the queue.
self.assert_(lease_time < 1)
# Since the server tried to send it, the ttl must be decremented
self.assertEqual(tasks[0].task_ttl - new_tasks[0].task_ttl, 1)
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
|
from __future__ import division
from collections import defaultdict
import numpy
from scipy import ndimage
DISTANCE = numpy.sqrt([
2., 1., 2.,
1., 1., 1.,
2., 1., 2.
])
DIR_MAP = dict(zip(range(9), [32, 64, 128, 16, -1, 1, 8, 4, 2]))
FLOWS_IN = numpy.array([2, 4, 8, 1, numpy.nan, 16, 128, 64, 32])
def _stack_neighbors(topo, radius=1, **padkwargs):
""" Create a MxNx9 array of neighbors for each element of a MxN
array.
Creates a MxNx9 array where each layer represents all of the
adjacent values at a give row/col. Input array is edge padded to
handle the blocks on edges and corners.
Parameters
----------
topo : numpy array (MxN)
An array represeting a digital elevation model (DEM)
radius : int
Search radius for stacking the neighbors. A radius = 1 implies
all eight immediately adjacent cells (plus the actual cell)
**padkwargs : optional parameters
Positional and keyword arguments padded to numpy.pad
Returns
-------
blocked : numpy array (MxNx9)
Array of neighboros where `blocked[row, col, :].reshape(3, 3)`
returns the block cells adjacent to and including
`topo[row, col]`
See Also
--------
numpy.pad
References
----------
`Stack Overflow <http://goo.gl/Y3h8ti>`_
"""
mode = padkwargs.pop('mode')
if mode == 'constant':
pad_width = ((radius, radius), (radius, radius))
elif mode == 'edge':
pad_width = radius
else:
raise NotImplementedError("only 'edge' and 'constant' modes are supported")
# pad the edges
padded = numpy.pad(topo, pad_width=pad_width, mode=mode, **padkwargs)
# new rows and cols count
M, N = padded.shape
# width is typically 3 -- length of each of
# block that defines the neighbors
width = radius * 2 + 1
row_length = N - width + 1
col_length = M - width + 1
# Linear indices for the starting width-by-width block
idx1 = numpy.arange(width)[:, None] * N + numpy.arange(width)
# Offset (from the starting block indices) linear indices for all the blocks
idx2 = numpy.arange(col_length)[:, None] * N + numpy.arange(row_length)
# Finally, get the linear indices for all blocks
all_idx = idx1.ravel()[None, None, :] + idx2[:, :, None]
# Index into padded for the final output
blocked = padded.ravel()[all_idx]
return blocked
def _adjacent_slopes(topo):
""" Compute the slope from each to cell to all of its neighbors.
Parameters
----------
topo : numpy array (MxN)
Elevation data
Returns
-------
slope : numpy array (MxNx9)
3-D array where the z-axis is the unraveled slope array of
each cell's neighbors.
Notes
-----
Downward slopes are represented with positive numbers.
See Also
--------
watershed.algo._stack_neighbors
"""
# initial array shape
rows, cols = topo.shape
# make 3-D array of each cell's neighbors
blocks = _stack_neighbors(topo, radius=1, mode='edge')
# change in elevation (dz/dx)
drop = topo.reshape(rows, cols, 1) - blocks
# Slope (dz/dx) masked to exclude uphill directions
slope = numpy.ma.masked_less_equal(drop / DISTANCE, 0)
return slope
def _mark_sinks(topo):
""" Marks sink areas in a DEM.
Parameters
----------
topo : numpy array
Eelvation data
Returns
-------
sink : numpy array
Bool array. True value indicate cell is (in) a sink.
"""
# compute the slopes in every direction at each cell
slope = _adjacent_slopes(topo)
# find where this is no downward slope
sinks = slope.data.max(axis=2) == 0
# remove 'sinks' on the edges of the array
sinks[(0, -1), :] = False
sinks[:, (0, -1)] = False
return sinks
def fill_sinks(topo, copy=True):
""" Fills sink areas in a DEM with the lowest adjacent elevation.
Parameters
----------
topo : numpy array
Elevation data
copy : bool, optional
When True, operates on a copy of the `topo` array. Set to
False if memory is a concern.
Returns
-------
filled : numpy array
Numpy array with all the sinks filled
See Also
--------
watershed.algo.flow_direction_d8
watershed.algo.trace_upstream
watershed.algo.flow_accumulation
"""
if copy:
_topo = topo.copy()
else:
_topo = topo
sinks = _mark_sinks(_topo)
blocks = _stack_neighbors(_topo, radius=1, mode='edge')
# return if there are no sinks to fill
if sinks.sum() == 0:
return _topo
else:
# loop through each sink and set its elevation to that of
# its lowest neighbor
rows, cols = numpy.where(sinks)
for r, c in zip(rows, cols):
# select all of the neighbors
neighbors = blocks[r, c, :]
# select the uphill neighbors
higher = neighbors[neighbors > _topo[r, c]]
# if we have uphill neighbors, take the closest one.
# otherwise, we'll come back to this when we recurse.
if higher.shape[0] > 0:
_topo[r, c] = higher.min()
# recursively go back and check that all the
# sinks were filled
return fill_sinks(_topo, copy=copy)
def _process_edges(slope, direction):
""" Handles edges and corners of the a flow-direction array.
When edges and corners do not flow into the interior of the
array, they need to flow out of the array.
"""
# shape of the raster
rows, cols = direction.shape
# where no flow direction could be computed
_r, _c = numpy.where(slope.mask.all(axis=2))
# no direction cells on the top row flow up
toprow = defaultdict(lambda: 64)
# top-row corners flow out at angles
toprow.update({0: 32, cols - 1: 128})
# no direction cells on the bottom flow down
bottomrow = defaultdict(lambda: 4)
# bottom row corners
bottomrow.update({0: 8, cols - 1: 2})
# set up the main look-up dictionary
# non-top or bottom cells flow left or right
missing_directions = defaultdict(lambda: {0: 16, cols - 1: 1})
# add top/bottom rows to the main dictionary
missing_directions.update({0: toprow, rows - 1: bottomrow})
# loop through all of the cells w/o flow direction
for r, c in zip(_r, _c):
if r in [0, rows - 1] or c in [0, cols - 1]:
direction[r, c] = missing_directions[r][c]
else:
# raise an error if we didn't clean up an internal sink
msg = "internal sink at ({}, {})".format(int(r), int(c))
raise ValueError(msg)
return direction
def flow_direction_d8(topo):
""" Compute the flow direction of topographic data.
Flow Directions from cell X:
32 64 128
16 X 1
8 4 2
Parameters
----------
topo : numpy array
Elevation data
Returns
-------
direction : numpy array
Flow directions as defined in the references below.
See Also
--------
watershed.fill_sinks
watershed.trace_upstream
watershed.flow_accumulation
References
--------
`<http://onlinelibrary.wiley.com/doi/10.1029/96WR03137/pdf>`_
"""
# inital array shape
rows, cols = topo.shape
slope = _adjacent_slopes(topo)
# location of the steepes slope
index = slope.argmax(axis=2)
direction = numpy.array([
DIR_MAP.get(x, -1) for x in index.flat
]).reshape(rows, cols)
return _process_edges(slope, direction)
def _trace_upstream(flow_dir, blocks, is_upstream, row, col):
""" Recursively traces all cells upstream from the specified cell.
Parameters
----------
flow_dir : numpy array (MxM)
Array defining the flow direction of each cell
blocks : numpy array (MxNx9)
Layers of arrays for each neighbor for each cell
(see _stack_neighbors)
is_upstream : numpy array (MxN)
Bool-like array where cells set to 1 (True) are upstream of the
in the flow network
row, col : int
Indices of the cells from which the upstream network should be
traced.
Returns
-------
None
.. note: Acts in-place on `is_upstream`
.. note: Called by the public function `trace_upstream`
See Also
--------
watershed.algo.flow_direction_d8
watershed.algo._stack_neighbors
watershed.algo.trace_upstream
"""
row = int(row)
col = int(col)
if is_upstream[row, col] == 0:
# we consider a cell to be upstream of itself
is_upstream[row, col] = 1
# flow direction of a cell's neighbors
neighbors = blocks[row, col, :].reshape(3, 3)
# indices of neighbors that flow into this cell
matches = numpy.where(neighbors == FLOWS_IN.reshape(3, 3))
# recurse on all of the neighbors
for rn, cn in zip(*matches):
_trace_upstream(flow_dir, blocks, is_upstream, row + rn - 1, col + cn - 1)
def trace_upstream(flow_dir, row, col):
""" Trace the upstream network from a cell based on flow direction.
Parameters
----------
flow_dir : numpy array (MxM)
Array defining the flow direction of each cell.
row, col : int
Indices of the cells from which the upstream network should be
traced.
Returns
-------
is_upstream : numpy array (MxN)
Bool-like array where cells set to 1 (True) are upstream of the
in the flow network.
See Also
--------
watershed.algo.fill_sinks
watershed.algo.flow_direction_d8
watershed.algo.flow_accumulation
"""
is_upstream = numpy.zeros_like(flow_dir)
# create the neighborhoods
blocks = _stack_neighbors(flow_dir, radius=1, mode='constant')
_trace_upstream(flow_dir, blocks, is_upstream, row, col)
return is_upstream
def mask_topo(topo, row, col, zoom_factor=1, mask_upstream=False):
""" Block out all cells that are not upstream from a specific cell.
Parameters
----------
topo : numpy array (MxN)
An array represeting a digital elevation model (DEM)
row, col : int
Indices of the cells from which the upstream network should be
traced.
zoom_factor : float, optional
Factor by which the image should be zoomed. Should typically be
less than 1 to effectively coursen very high resolution DEMs
so that flat areas or depressions don't truncate the trace.
mask_upstream : bool, optional
If False (default) all cell *not* upstream of `topo[row, col]`
will be maked. Otherwise, the upstream cells will be masked
Returns
-------
topo_masked : numpy masked array (MxN)
Masked array where all cells not upstream of `topo[row, col]`
are masked out.
"""
# apply the zoom_factor
_topo = ndimage.zoom(topo, zoom_factor, order=0)
_row, _col = map(lambda x: numpy.floor(x * zoom_factor), (row, col))
# determine the flow direction
flow_dir = flow_direction_d8(_topo)
# trace upstream on the zoomed DEM
_upstream = trace_upstream(flow_dir, _row, _col)
# unzoom the upstream mask
upstream = ndimage.zoom(_upstream, zoom_factor**-1, order=0)
# apply the mask
if mask_upstream:
return numpy.ma.masked_array(data=topo, mask=upstream)
else:
return numpy.ma.masked_array(data=topo, mask=numpy.logical_not(upstream))
def flow_accumulation(flow_dir):
""" Compute the flow accumulation from flow directions.
Determines the number of cells flowing into every cell
in an array represeting flow direction.
Parameters
----------
flow_dir : numpy array
Array representing flow direction.
Returns
-------
flow_acc : numpy array
Array representing the flow accumulation for each
cell in the input `flow_dir` array.
See Also
--------
watershed.algo.fill_sinks
watershed.algo.flow_direction_d8
watershed.algo.trace_upstream
References
----------
`Esri's flow accumulation example <http://goo.gl/57r7SU>`_
"""
# initial the output array
flow_acc = numpy.zeros_like(flow_dir)
for row in range(flow_acc.shape[0]):
for col in range(flow_acc.shape[1]):
flow_acc[row, col] = trace_upstream(flow_dir, row, col).sum() - 1
return flow_acc
|
|
import unittest
import functools
import numpy
from operator import mul
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
@testing.parameterize(*(testing.product({
'dims': [(5,), (4, 3), (3, 4, 3)],
'cover_all': [True, False],
'c_contiguous': [True],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'autotune': [True, False],
}) + testing.product({
'dims': [(4,)],
'cover_all': [False],
'c_contiguous': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'autotune': [False],
})))
class TestConvolutionND(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels) + ksize
self.W = numpy.random.normal(0, W_scale, W_shape).astype(self.W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(self.x_dtype)
x_shape = (2, 3) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all)
for (d, k, s, p) in zip(self.dims, ksize, self.stride, self.pad))
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 3e-5, 'rtol': 3e-4}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 2 ** -4, 'rtol': 2 ** -4}
self.ggx = numpy.random.uniform(-1, 1, self.x.shape).astype(
self.x_dtype)
self.ggW = numpy.random.uniform(-1, 1, self.W.shape).astype(
self.W_dtype)
self.ggb = numpy.random.uniform(-1, 1, self.b.shape).astype(
self.x_dtype)
def check_forward_consistency(self, nobias=False, use_cudnn='never'):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
y_cpu = F.convolution_nd(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
y_gpu = F.convolution_nd(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
@attr.cudnn
def test_forward_consistency(self):
self.check_forward_consistency(nobias=False, use_cudnn='always')
@attr.cudnn
def test_forward_consistency_nobias(self):
self.check_forward_consistency(nobias=True, use_cudnn='always')
@attr.gpu
def test_forward_consistency_im2col(self):
self.check_forward_consistency(nobias=False, use_cudnn='never')
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.check_forward_consistency(nobias=True, use_cudnn='never')
def check_forward_consistency_regression(self, nobias=False):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
b = None if nobias else chainer.Variable(self.b)
with chainer.using_config('use_cudnn', 'never'):
y_nd = F.convolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
y_2d = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_nd.data, y_2d.data, **self.check_forward_options)
def test_forward_consistency_regression(self):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(nobias=False)
def test_forward_consistency_regression_nobias(self):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad,
use_cudnn='never'):
xp = cuda.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertTrue(x_data.flags.f_contiguous)
self.assertTrue(W_data.flags.f_contiguous)
self.assertTrue(y_grad.flags.f_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args += (b_data,)
def f(*args):
return F.convolution_nd(
*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
gradient_check.check_backward(
f, args, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy),
use_cudnn='always')
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy),
use_cudnn='always')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy),
use_cudnn='never')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy),
use_cudnn='never')
def check_double_backward(self, x_data, W_data, b_data, y_grad,
x_grad_grad, W_grad_grad, b_grad_grad,
use_cudnn='always'):
xp = cuda.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
x_grad_grad = xp.asfortranarray(x_grad_grad)
W_grad_grad = xp.asfortranarray(W_grad_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
self.assertFalse(x_grad_grad.flags.c_contiguous)
self.assertFalse(W_grad_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
ggb = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
ggb[::2] = b_grad_grad
b_grad_grad = ggb[::2]
self.assertFalse(b_grad_grad.flags.c_contiguous)
args = (x_data, W_data)
grad_grads = (x_grad_grad, W_grad_grad)
if b_data is not None:
args += (b_data,)
grad_grads += (b_grad_grad,)
def f(*args):
y = F.convolution_nd(*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
return y * y # make the function nonlinear
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
gradient_check.check_double_backward(
f, args, y_grad, grad_grads,
dtype='d', atol=5e-3, rtol=5e-2)
@condition.retry(3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.W, self.b, self.gy,
self.ggx, self.ggW, self.ggb,
use_cudnn='always')
@condition.retry(3)
def test_double_backward_cpu_nobias(self):
self.check_double_backward(self.x, self.W, None, self.gy,
self.ggx, self.ggW, None,
use_cudnn='always')
def check_double_backward_gpu(self, bias=True, im2col=False):
use_cudnn = 'never' if im2col else 'always'
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b) if bias else None,
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggW),
cuda.to_gpu(self.ggb) if bias else None,
use_cudnn=use_cudnn)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu(self):
self.check_double_backward_gpu()
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_nobias(self):
self.check_double_backward_gpu(bias=False)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_im2col(self):
self.check_double_backward_gpu(im2col=True)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_im2col_nobias(self):
self.check_double_backward_gpu(bias=False, im2col=True)
@testing.parameterize(*testing.product({
'dims': [(10,), (10, 8), (10, 8, 6)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestConvolutionNDCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(
self.dims, ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto') and ndim > 1
def forward(self):
x = chainer.Variable(cuda.to_gpu(self.x))
W = chainer.Variable(cuda.to_gpu(self.W))
return F.convolution_nd(
x, W, None, stride=self.stride, pad=self.pad)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cuda.cudnn.convolutionForward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
name = 'cupy.cuda.cudnn.convolutionBackwardData_v3'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.expect)
class TestConvolutionNDarraySupplied(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (N, in_channels, 3, 3, 3)
self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (out_channels, in_channels, 1, 1, 1)
self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
def check_array_supplied(self, x_ary, W_ary, b_ary):
y_ary = F.convolution_nd(x_ary, W_ary, b_ary)
x_var = chainer.Variable(x_ary)
W_var = chainer.Variable(W_ary)
b_var = chainer.Variable(b_ary)
y_var = F.convolution_nd(x_var, W_var, b_var)
testing.assert_allclose(y_ary.data, y_var.data)
def test_array_supplied_cpu(self):
self.check_array_supplied(self.x_data, self.W_data, self.b_data)
@attr.gpu
def test_array_supplied_gpu(self):
self.check_array_supplied(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.W_data),
cuda.to_gpu(self.b_data))
class TestConvolutionNDBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(chainer.Variable(x), w)
z = F.sum(y)
z.backward()
def test_2(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
testing.run_module(__name__, __file__)
|
|
"""
PyScript standard functions.
Functions are declared as ... functions. Methods are written as methods
(using this), but declared as functions, and then "apply()-ed" to the
instance of interest. Declaring methods on Object is a bad idea (breaks
Bokeh, jquery).
"""
import re
# Functions not covered by this lib:
# isinstance, issubclass, print, len, max, min, callable, chr, ord
FUNCTIONS = {}
METHODS = {}
FUNCTION_PREFIX = '_pyfunc_'
METHOD_PREFIX = '_pymeth_'
def get_std_info(code):
""" Given the JS code for a std function or method, determine the
number of arguments, function_deps and method_deps.
"""
_, _, nargs = code.splitlines()[0].partition('nargs:')
nargs = [int(i.strip()) for i in nargs.strip().replace(',', ' ').split(' ') if i]
# Collect dependencies on other funcs/methods
sep = FUNCTION_PREFIX
function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Reduce and sort
function_deps = sorted(set(function_deps))
method_deps = sorted(set(method_deps))
# Filter
function_deps = [dep for dep in function_deps if dep not in method_deps]
function_deps = set([dep for dep in function_deps if dep in FUNCTIONS])
method_deps = set([dep for dep in method_deps if dep in METHODS])
# Recurse
for dep in list(function_deps):
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in list(method_deps):
_update_deps(METHODS[dep], function_deps, method_deps)
return nargs, sorted(function_deps), sorted(method_deps)
def _update_deps(code, function_deps, method_deps):
""" Given the code of a dependency, recursively resolve additional dependencies.
"""
# Collect deps
sep = FUNCTION_PREFIX
new_function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
new_method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Update
new_function_deps = set(new_function_deps).difference(function_deps)
new_method_deps = set(new_method_deps).difference(method_deps)
function_deps.update(new_function_deps)
method_deps.update(new_method_deps)
# Recurse
for dep in new_function_deps:
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in new_method_deps:
_update_deps(METHODS[dep], function_deps, method_deps)
return function_deps, method_deps
def get_partial_std_lib(func_names, method_names, indent=0,
func_prefix=None, method_prefix=None):
""" Get the code for the PyScript standard library consisting of
the given function and method names. The given indent specifies how
many sets of 4 spaces to prepend.
"""
func_prefix = 'var ' + FUNCTION_PREFIX if (func_prefix is None) else func_prefix
method_prefix = 'var ' + METHOD_PREFIX if (method_prefix is None) else method_prefix
lines = []
for name in sorted(func_names):
code = FUNCTIONS[name].strip()
if '\n' not in code:
code = code.rsplit('//', 1)[0].rstrip() # strip comment from one-liners
lines.append('%s%s = %s;' % (func_prefix, name, code))
for name in sorted(method_names):
code = METHODS[name].strip()
# lines.append('Object.prototype.%s%s = %s;' % (METHOD_PREFIX, name, code))
lines.append('%s%s = %s;' % (method_prefix, name, code))
code = '\n'.join(lines)
if indent:
lines = [' '*indent + line for line in code.splitlines()]
code = '\n'.join(lines)
return code
def get_full_std_lib(indent=0):
""" Get the code for the full PyScript standard library.
The given indent specifies how many sets of 4 spaces to prepend.
If the full stdlib is made available in JavaScript, multiple
snippets of code can be transpiled without inlined stdlib parts by
using ``py2js(..., inline_stdlib=False)``.
"""
return get_partial_std_lib(FUNCTIONS.keys(), METHODS.keys(), indent)
# todo: now that we have modules, we can have shorter/no prefixes, right?
# -> though maybe we use them for string replacement somewhere?
def get_all_std_names():
""" Get list if function names and methods names in std lib.
"""
return ([FUNCTION_PREFIX + f for f in FUNCTIONS],
[METHOD_PREFIX + f for f in METHODS])
## ----- Functions
## Special functions: not really in buildins, but important enough to support
FUNCTIONS['perf_counter'] = """function() { // nargs: 0
if (typeof(process) === "undefined"){return performance.now()*1e-3;}
else {var t = process.hrtime(); return t[0] + t[1]*1e-9;}
}""" # Work in nodejs and browser
FUNCTIONS['time'] = """function () {return Date.now() / 1000;} // nargs: 0"""
## Hardcore functions
FUNCTIONS['op_instantiate'] = """function (ob, args) { // nargs: 2
if ((typeof ob === "undefined") ||
(typeof window !== "undefined" && window === ob) ||
(typeof global !== "undefined" && global === ob))
{throw "Class constructor is called as a function.";}
for (var name in ob) {
if (Object[name] === undefined &&
typeof ob[name] === 'function' && !ob[name].nobind) {
ob[name] = ob[name].bind(ob);
}
}
if (ob.__init__) {
ob.__init__.apply(ob, args);
}
}"""
FUNCTIONS['hasattr'] = """function (ob, name) { // nargs: 2
return (ob !== undefined) && (ob !== null) && (ob[name] !== undefined);
}"""
FUNCTIONS['getattr'] = """function (ob, name, deflt) { // nargs: 2 3
var has_attr = ob !== undefined && ob !== null && ob[name] !== undefined;
if (has_attr) {return ob[name];}
else if (arguments.length == 3) {return deflt;}
else {var e = Error(name); e.name='AttributeError'; throw e;}
}"""
FUNCTIONS['setattr'] = """function (ob, name, value) { // nargs: 3
ob[name] = value;
}"""
FUNCTIONS['delattr'] = """function (ob, name) { // nargs: 2
delete ob[name];
}"""
FUNCTIONS['dict'] = """function (x) {
var t, i, keys, r={};
if (Array.isArray(x)) {
for (i=0; i<x.length; i++) {
t=x[i]; r[t[0]] = t[1];
}
} else {
keys = Object.keys(x);
for (i=0; i<keys.length; i++) {
t=keys[i]; r[t] = x[t];
}
}
return r;
}"""
FUNCTIONS['list'] = """function (x) {
var r=[];
if (typeof x==="object" && !Array.isArray(x)) {x = Object.keys(x)}
for (var i=0; i<x.length; i++) {
r.push(x[i]);
}
return r;
}"""
FUNCTIONS['range'] = """function (start, end, step) {
var i, res = [];
var val = start;
var n = (end - start) / step;
for (i=0; i<n; i++) {
res.push(val);
val += step;
}
return res;
}"""
## Normal functions
FUNCTIONS['pow'] = 'Math.pow // nargs: 2'
FUNCTIONS['sum'] = """function (x) { // nargs: 1
return x.reduce(function(a, b) {return a + b;});
}"""
FUNCTIONS['round'] = 'Math.round // nargs: 1'
FUNCTIONS['int'] = """function (x) { // nargs: 1
return x<0 ? Math.ceil(x): Math.floor(x);
}"""
FUNCTIONS['float'] = 'Number // nargs: 1'
FUNCTIONS['str'] = 'String // nargs: 0 1'
FUNCTIONS['repr'] = """function (x) { // nargs: 1
var res = JSON.stringify(x);
if (typeof res === 'undefined') { res = String(x); }
return res;
}"""
FUNCTIONS['bool'] = """function (x) { // nargs: 1
return Boolean(FUNCTION_PREFIXtruthy(x));
}"""
FUNCTIONS['abs'] = 'Math.abs // nargs: 1'
FUNCTIONS['divmod'] = """function (x, y) { // nargs: 2
var m = x % y; return [(x-m)/y, m];
}"""
FUNCTIONS['all'] = """function (x) { // nargs: 1
for (var i=0; i<x.length; i++) {
if (!FUNCTION_PREFIXtruthy(x[i])){return false;}
} return true;
}"""
FUNCTIONS['any'] = """function (x) { // nargs: 1
for (var i=0; i<x.length; i++) {
if (FUNCTION_PREFIXtruthy(x[i])){return true;}
} return false;
}"""
FUNCTIONS['enumerate'] = """function (iter) { // nargs: 1
var i, res=[];
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
for (i=0; i<iter.length; i++) {res.push([i, iter[i]]);}
return res;
}"""
FUNCTIONS['zip'] = """function () { // nargs: 2 3 4 5 6 7 8 9
var i, j, tup, arg, args = [], res = [], len = 1e20;
for (i=0; i<arguments.length; i++) {
arg = arguments[i];
if ((typeof arg==="object") && (!Array.isArray(arg))) {arg = Object.keys(arg);}
args.push(arg);
len = Math.min(len, arg.length);
}
for (j=0; j<len; j++) {
tup = []
for (i=0; i<args.length; i++) {tup.push(args[i][j]);}
res.push(tup);
}
return res;
}"""
FUNCTIONS['reversed'] = """function (iter) { // nargs: 1
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.slice().reverse();
}"""
FUNCTIONS['sorted'] = """function (iter, key, reverse) { // nargs: 1 2 3
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
var comp = function (a, b) {a = key(a); b = key(b);
if (a<b) {return -1;} if (a>b) {return 1;} return 0;};
comp = Boolean(key) ? comp : undefined;
iter = iter.slice().sort(comp);
if (reverse) iter.reverse();
return iter;
}"""
FUNCTIONS['filter'] = """function (func, iter) { // nargs: 2
if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.filter(func);
}"""
FUNCTIONS['map'] = """function (func, iter) { // nargs: 2
if (typeof func === "undefined" || func === null) {func = function(x) {return x;}}
if ((typeof iter==="object") && (!Array.isArray(iter))) {iter = Object.keys(iter);}
return iter.map(func);
}"""
## Other / Helper functions
FUNCTIONS['truthy'] = """function (v) {
if (v === null || typeof v !== "object") {return v;}
else if (v.length !== undefined) {return v.length ? v : false;}
else if (v.byteLength !== undefined) {return v.byteLength ? v : false;}
else if (v.constructor !== Object) {return true;}
else {return Object.getOwnPropertyNames(v).length ? v : false;}
}"""
FUNCTIONS['op_equals'] = """function op_equals (a, b) { // nargs: 2
if (a == null || b == null) {
} else if (Array.isArray(a) && Array.isArray(b)) {
var i = 0, iseq = a.length == b.length;
while (iseq && i < a.length) {iseq = op_equals(a[i], b[i]); i+=1;}
return iseq;
} else if (a.constructor === Object && b.constructor === Object) {
var akeys = Object.keys(a), bkeys = Object.keys(b);
akeys.sort(); bkeys.sort();
var i=0, k, iseq = op_equals(akeys, bkeys);
while (iseq && i < akeys.length)
{k=akeys[i]; iseq = op_equals(a[k], b[k]); i+=1;}
return iseq;
} return a == b;
}"""
FUNCTIONS['op_contains'] = """function op_contains (a, b) { // nargs: 2
if (b == null) {
} else if (Array.isArray(b)) {
for (var i=0; i<b.length; i++) {if (FUNCTION_PREFIXop_equals(a, b[i]))
return true;}
return false;
} else if (b.constructor === Object) {
for (var k in b) {if (a == k) return true;}
return false;
} else if (b.constructor == String) {
return b.indexOf(a) >= 0;
} var e = Error('Not a container: ' + b); e.name='TypeError'; throw e;
}"""
FUNCTIONS['op_add'] = """function (a, b) { // nargs: 2
if (Array.isArray(a) && Array.isArray(b)) {
return a.concat(b);
} return a + b;
}"""
FUNCTIONS['op_mult'] = """function (a, b) { // nargs: 2
if ((typeof a === 'number') + (typeof b === 'number') === 1) {
if (a.constructor === String) return METHOD_PREFIXrepeat(a, b);
if (b.constructor === String) return METHOD_PREFIXrepeat(b, a);
if (Array.isArray(b)) {var t=a; a=b; b=t;}
if (Array.isArray(a)) {
var res = []; for (var i=0; i<b; i++) res = res.concat(a);
return res;
}
} return a * b;
}"""
## ----- Methods
## List only
METHODS['append'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
this.push(x);
}"""
METHODS['extend'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
this.push.apply(this, x);
}"""
METHODS['insert'] = """function (i, x) { // nargs: 2
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
i = (i < 0) ? this.length + i : i;
this.splice(i, 0, x);
}"""
METHODS['remove'] = """function (x) { // nargs: 1
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
for (var i=0; i<this.length; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {this.splice(i, 1); return;}
}
var e = Error(x); e.name='ValueError'; throw e;
}"""
METHODS['reverse'] = """function () { // nargs: 0
this.reverse();
}"""
METHODS['sort'] = """function (key, reverse) { // nargs: 0 1 2
if (!Array.isArray(this)) return this.KEY.apply(this, arguments);
var comp = function (a, b) {a = key(a); b = key(b);
if (a<b) {return -1;} if (a>b) {return 1;} return 0;};
comp = Boolean(key) ? comp : undefined;
this.sort(comp);
if (reverse) this.reverse();
}"""
## List and dict
METHODS['clear'] = """function () { // nargs: 0
if (Array.isArray(this)) {
this.splice(0, this.length);
} else if (this.constructor === Object) {
var keys = Object.keys(this);
for (var i=0; i<keys.length; i++) delete this[keys[i]];
} else return this.KEY.apply(this, arguments);
}"""
METHODS['copy'] = """function () { // nargs: 0
if (Array.isArray(this)) {
return this.slice(0);
} else if (this.constructor === Object) {
var key, keys = Object.keys(this), res = {};
for (var i=0; i<keys.length; i++) {key = keys[i]; res[key] = this[key];}
return res;
} else return this.KEY.apply(this, arguments);
}"""
METHODS['pop'] = """function (i, d) { // nargs: 1 2
if (Array.isArray(this)) {
i = (i === undefined) ? -1 : i;
i = (i < 0) ? (this.length + i) : i;
var popped = this.splice(i, 1);
if (popped.length) return popped[0];
var e = Error(i); e.name='IndexError'; throw e;
} else if (this.constructor === Object) {
var res = this[i]
if (res !== undefined) {delete this[i]; return res;}
else if (d !== undefined) return d;
var e = Error(i); e.name='KeyError'; throw e;
} else return this.KEY.apply(this, arguments);
}"""
## List and str
# start and stop nor supported for list on Python, but for simplicity, we do
METHODS['count'] = """function (x, start, stop) { // nargs: 1 2 3
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
if (Array.isArray(this)) {
var count = 0;
for (var i=0; i<this.length; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {count+=1;}
} return count;
} else if (this.constructor == String) {
var count = 0, i = start;
while (i >= 0 && i < stop) {
i = this.indexOf(x, i);
if (i < 0) break;
count += 1;
i += Math.max(1, x.length);
} return count;
} else return this.KEY.apply(this, arguments);
}"""
METHODS['index'] = """function (x, start, stop) { // nargs: 1 2 3
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
if (Array.isArray(this)) {
for (var i=start; i<stop; i++) {
if (FUNCTION_PREFIXop_equals(this[i], x)) {return i;} // indexOf cant
}
} else if (this.constructor === String) {
var i = this.slice(start, stop).indexOf(x);
if (i >= 0) return i + start;
} else return this.KEY.apply(this, arguments);
var e = Error(x); e.name='ValueError'; throw e;
}"""
## Dict only
# note: fromkeys is a classmethod, and we dont support it.
METHODS['get'] = """function (key, d) { // nargs: 1 2
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
if (this[key] !== undefined) {return this[key];}
else if (d !== undefined) {return d;}
else {return null;}
}"""
METHODS['items'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(this), res = []
for (var i=0; i<keys.length; i++) {key = keys[i]; res.push([key, this[key]]);}
return res;
}"""
METHODS['keys'] = """function () { // nargs: 0
if (typeof this['KEY'] === 'function') return this.KEY.apply(this, arguments);
return Object.keys(this);
}"""
METHODS['popitem'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var keys, key, val;
keys = Object.keys(this);
if (keys.length == 0) {var e = Error(); e.name='KeyError'; throw e;}
key = keys[0]; val = this[key]; delete this[key];
return [key, val];
}"""
METHODS['setdefault'] = """function (key, d) { // nargs: 1 2
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
if (this[key] !== undefined) {return this[key];}
else if (d !== undefined) { this[key] = d; return d;}
else {return null;}
}"""
METHODS['update'] = """function (other) { // nargs: 1
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(other);
for (var i=0; i<keys.length; i++) {key = keys[i]; this[key] = other[key];}
}"""
METHODS['values'] = """function () { // nargs: 0
if (this.constructor !== Object) return this.KEY.apply(this, arguments);
var key, keys = Object.keys(this), res = [];
for (var i=0; i<keys.length; i++) {key = keys[i]; res.push(this[key]);}
return res;
}"""
## String only
# ignores: encode, decode, format, format_map, isdecimal, isdigit,
# isprintable, maketrans
# Not a Python method, but a method that we need, and is only ECMA 6
# http://stackoverflow.com/a/5450113/2271927
METHODS['repeat'] = """function(count) { // nargs: 0
if (this.repeat) return this.repeat(count);
if (count < 1) return '';
var result = '', pattern = this.valueOf();
while (count > 1) {
if (count & 1) result += pattern;
count >>= 1, pattern += pattern;
}
return result + pattern;
}"""
METHODS['capitalize'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.slice(0, 1).toUpperCase() + this.slice(1).toLowerCase();
}"""
METHODS['casefold'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toLowerCase();
}"""
METHODS['center'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
var left = Math.ceil(tofill / 2);
var right = tofill - left;
return METHOD_PREFIXrepeat(fill, left) + this + METHOD_PREFIXrepeat(fill, right);
}"""
METHODS['endswith'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.lastIndexOf(x) == this.length - x.length;
}"""
METHODS['expandtabs'] = """function (tabsize) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
tabsize = (tabsize === undefined) ? 8 : tabsize;
return this.replace(/\\t/g, METHOD_PREFIXrepeat(' ', tabsize));
}"""
METHODS['find'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
var i = this.slice(start, stop).indexOf(x);
if (i >= 0) return i + start;
return -1;
}"""
METHODS['isalnum'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z0-9]+$/.test(this));
}"""
METHODS['isalpha'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z]+$/.test(this));
}"""
# METHODS['isdecimal'] = """function () {
# if (this.constructor !== String) return this.KEY.apply(this, arguments);
# return Boolean(/^[0-9]+$/.test(this));
# }"""
#
# METHODS['isdigit'] = METHODS['isdecimal']
METHODS['isidentifier'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[A-Za-z_][A-Za-z0-9_]*$/.test(this));
}"""
METHODS['islower'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), high = this.toUpperCase();
return low != high && low == this;
}"""
METHODS['isnumeric'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^[0-9]+$/.test(this));
}"""
METHODS['isspace'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return Boolean(/^\\s+$/.test(this));
}"""
METHODS['istitle'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), title = METHOD_PREFIXtitle(this);
return low != title && title == this;
}"""
METHODS['isupper'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var low = this.toLowerCase(), high = this.toUpperCase();
return low != high && high == this;
}"""
METHODS['join'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return x.join(this); // call join on the list instead of the string.
}"""
METHODS['ljust'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
return this + METHOD_PREFIXrepeat(fill, tofill);
}"""
METHODS['lower'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toLowerCase();
}"""
METHODS['lstrip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
for (var i=0; i<this.length; i++) {
if (chars.indexOf(this[i]) < 0) return this.slice(i);
} return '';
}"""
METHODS['partition'] = """function (sep) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
var i1 = this.indexOf(sep);
if (i1 < 0) return [this.slice(0), '', '']
var i2 = i1 + sep.length;
return [this.slice(0, i1), this.slice(i1, i2), this.slice(i2)];
}"""
METHODS['replace'] = """function (s1, s2, count) { // nargs: 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i = 0, i2, parts = [];
count = (count === undefined) ? 1e20 : count;
while (count > 0) {
i2 = this.indexOf(s1, i);
if (i2 >= 0) {
parts.push(this.slice(i, i2));
parts.push(s2);
i = i2 + s1.length;
count -= 1;
} else break;
}
parts.push(this.slice(i));
return parts.join('');
}"""
METHODS['rfind'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
start = (start === undefined) ? 0 : start;
stop = (stop === undefined) ? this.length : stop;
start = Math.max(0, ((start < 0) ? this.length + start : start));
stop = Math.min(this.length, ((stop < 0) ? this.length + stop : stop));
var i = this.slice(start, stop).lastIndexOf(x);
if (i >= 0) return i + start;
return -1;
}"""
METHODS['rindex'] = """function (x, start, stop) { // nargs: 1 2 3
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i = METHOD_PREFIXrfind(this, x, start, stop);
if (i >= 0) return i;
var e = Error(x); e.name='ValueError'; throw e;
}"""
METHODS['rjust'] = """function (w, fill) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
fill = (fill === undefined) ? ' ' : fill;
var tofill = Math.max(0, w - this.length);
return METHOD_PREFIXrepeat(fill, tofill) + this;
}"""
METHODS['rpartition'] = """function (sep) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
var i1 = this.lastIndexOf(sep);
if (i1 < 0) return ['', '', this.slice(0)]
var i2 = i1 + sep.length;
return [this.slice(0, i1), this.slice(i1, i2), this.slice(i2)];
}"""
METHODS['rsplit'] = """function (sep, count) { // nargs: 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
sep = (sep === undefined) ? /\\s/ : sep;
count = Math.max(0, (count === undefined) ? 1e20 : count);
var parts = this.split(sep);
var limit = Math.max(0, parts.length-count);
var res = parts.slice(limit);
if (count < parts.length) res.splice(0, 0, parts.slice(0, limit).join(sep));
return res;
}"""
METHODS['rstrip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
for (var i=this.length-1; i>=0; i--) {
if (chars.indexOf(this[i]) < 0) return this.slice(0, i+1);
} return '';
}"""
METHODS['split'] = """function (sep, count) { // nargs: 0, 1 2
if (this.constructor !== String) return this.KEY.apply(this, arguments);
if (sep === '') {var e = Error('empty sep'); e.name='ValueError'; throw e;}
sep = (sep === undefined) ? /\\s/ : sep;
if (count === undefined) { return this.split(sep); }
var res = [], i = 0, index1 = 0, index2 = 0;
while (i < count && index1 < this.length) {
index2 = this.indexOf(sep, index1);
if (index2 < 0) { break; }
res.push(this.slice(index1, index2));
index1 = index2 + sep.length || 1;
i += 1;
}
res.push(this.slice(index1));
return res;
}"""
METHODS['splitlines'] = """function (keepends) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
keepends = keepends ? 1 : 0
var finder = /\\r\\n|\\r|\\n/g;
var i = 0, i2, isrn, parts = [];
while (finder.exec(this) !== null) {
i2 = finder.lastIndex -1;
isrn = i2 > 0 && this[i2-1] == '\\r' && this[i2] == '\\n';
if (keepends) parts.push(this.slice(i, finder.lastIndex));
else parts.push(this.slice(i, i2 - isrn));
i = finder.lastIndex;
}
if (i < this.length) parts.push(this.slice(i));
else if (!parts.length) parts.push('');
return parts;
}"""
METHODS['startswith'] = """function (x) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.indexOf(x) == 0;
}"""
METHODS['strip'] = """function (chars) { // nargs: 0 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
chars = (chars === undefined) ? ' \\t\\r\\n' : chars;
var i, s1 = this, s2 = '', s3 = '';
for (i=0; i<s1.length; i++) {
if (chars.indexOf(s1[i]) < 0) {s2 = s1.slice(i); break;}
} for (i=s2.length-1; i>=0; i--) {
if (chars.indexOf(s2[i]) < 0) {s3 = s2.slice(0, i+1); break;}
} return s3;
}"""
METHODS['swapcase'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var c, res = [];
for (var i=0; i<this.length; i++) {
c = this[i];
if (c.toUpperCase() == c) res.push(c.toLowerCase());
else res.push(c.toUpperCase());
} return res.join('');
}"""
METHODS['title'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var i0, res = [], tester = /^[^A-Za-z]?[A-Za-z]$/;
for (var i=0; i<this.length; i++) {
i0 = Math.max(0, i-1);
if (tester.test(this.slice(i0, i+1))) res.push(this[i].toUpperCase());
else res.push(this[i].toLowerCase());
} return res.join('');
}"""
METHODS['translate'] = """function (table) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
var c, res = [];
for (var i=0; i<this.length; i++) {
c = table[this[i]];
if (c === undefined) res.push(this[i]);
else if (c !== null) res.push(c);
} return res.join('');
}"""
METHODS['upper'] = """function () { // nargs: 0
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return this.toUpperCase();
}"""
METHODS['zfill'] = """function (width) { // nargs: 1
if (this.constructor !== String) return this.KEY.apply(this, arguments);
return METHOD_PREFIXrjust(this, width, '0');
}"""
for key in METHODS:
METHODS[key] = re.subn(r'METHOD_PREFIX(.+?)\(',
r'METHOD_PREFIX\1.call(', METHODS[key])[0]
METHODS[key] = METHODS[key].replace(
'KEY', key).replace(
'FUNCTION_PREFIX', FUNCTION_PREFIX).replace(
'METHOD_PREFIX', METHOD_PREFIX).replace(
', )', ')')
for key in FUNCTIONS:
FUNCTIONS[key] = re.subn(r'METHOD_PREFIX(.+?)\(',
r'METHOD_PREFIX\1.call(', FUNCTIONS[key])[0]
FUNCTIONS[key] = FUNCTIONS[key].replace(
'KEY', key).replace(
'FUNCTION_PREFIX', FUNCTION_PREFIX).replace(
'METHOD_PREFIX', METHOD_PREFIX)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._file_shares_operations import build_create_request, build_delete_request, build_get_request, build_lease_request, build_list_request, build_restore_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FileSharesOperations:
"""FileSharesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.FileShareItems"]:
"""Lists all shares.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of shares that can be included in the
list.
:type maxpagesize: str
:param filter: Optional. When specified, only share names starting with the filter will be
listed.
:type filter: str
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: deleted, snapshots. Should be passed as a string with delimiter ','.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileShareItems or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_08_01.models.FileShareItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShareItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FileShareItems", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares'} # type: ignore
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: "_models.FileShare",
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.FileShare":
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties of the file share to create.
:type file_share: ~azure.mgmt.storage.v2021_08_01.models.FileShare
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: snapshots. Should be passed as a string with delimiter ','.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(file_share, 'FileShare')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
expand=expand,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FileShare', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: "_models.FileShare",
**kwargs: Any
) -> "_models.FileShare":
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties to update for the file share.
:type file_share: ~azure.mgmt.storage.v2021_08_01.models.FileShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(file_share, 'FileShare')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
share_name: str,
expand: Optional[str] = None,
x_ms_snapshot: Optional[str] = None,
**kwargs: Any
) -> "_models.FileShare":
"""Gets properties of a specified share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: stats. Should be passed as a string with delimiter ','.
:type expand: str
:param x_ms_snapshot: Optional, used to retrieve properties of a snapshot.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
expand=expand,
x_ms_snapshot=x_ms_snapshot,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
share_name: str,
x_ms_snapshot: Optional[str] = None,
include: Optional[str] = None,
**kwargs: Any
) -> None:
"""Deletes specified share under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param x_ms_snapshot: Optional, used to delete a snapshot.
:type x_ms_snapshot: str
:param include: Optional. Valid values are: snapshots, leased-snapshots, none. The default
value is snapshots. For 'snapshots', the file share is deleted including all of its file share
snapshots. If the file share contains leased-snapshots, the deletion fails. For
'leased-snapshots', the file share is deleted included all of its file share snapshots
(leased/unleased). For 'none', the file share is deleted if it has no share snapshots. If the
file share contains any snapshots (leased or unleased), the deletion fails.
:type include: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
x_ms_snapshot=x_ms_snapshot,
include=include,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def restore(
self,
resource_group_name: str,
account_name: str,
share_name: str,
deleted_share: "_models.DeletedShare",
**kwargs: Any
) -> None:
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param deleted_share:
:type deleted_share: ~azure.mgmt.storage.v2021_08_01.models.DeletedShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(deleted_share, 'DeletedShare')
request = build_restore_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.restore.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
restore.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore'} # type: ignore
@distributed_trace_async
async def lease(
self,
resource_group_name: str,
account_name: str,
share_name: str,
x_ms_snapshot: Optional[str] = None,
parameters: Optional["_models.LeaseShareRequest"] = None,
**kwargs: Any
) -> "_models.LeaseShareResponse":
"""The Lease Share operation establishes and manages a lock on a share for delete operations. The
lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param x_ms_snapshot: Optional. Specify the snapshot time to lease a snapshot.
:type x_ms_snapshot: str
:param parameters: Lease Share request body.
:type parameters: ~azure.mgmt.storage.v2021_08_01.models.LeaseShareRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseShareResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.LeaseShareResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseShareResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'LeaseShareRequest')
else:
_json = None
request = build_lease_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
x_ms_snapshot=x_ms_snapshot,
template_url=self.lease.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('LeaseShareResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/lease'} # type: ignore
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains all data generating code for datasets used in the script."""
from __future__ import division
import os
import tempfile
import time
import numpy as np
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1.keras.datasets import mnist
from tensorflow.compat.v1.keras.models import model_from_json
from clustering_normalized_cuts import pairs
_IGNORE_SSL_ERROR = True # Don't verify SSL certs.
def get_data(params):
"""preprocesses all data.
Args:
params: all parameters.
Returns:
A nested dictionary nested dict with the following keys:
the permutations (if any) used to shuffle the training and validation sets
'p_train' - p_train
'p_val' - p_val
the data used for CNC
'cnc'
'train_and_test' - (x_train, y_train, x_val, y_val,
x_test, y_test)
'train_unlabeled_and_labeled' - (x_train_unlabeled, y_train_unlabeled,
x_train_labeled, y_train_labeled)
'val_unlabeled_and_labeled' - (x_val_unlabeled, y_val_unlabeled,
x_val_labeled, y_val_labeled)
the data used for siamese net, if the architecture uses the siamese net
'siamese'
'train_and_test' - (pairs_train, dist_train, pairs_val,
dist_val)
'train_unlabeled_and_labeled' - (pairs_train_unlabeled,
dist_train_unlabeled, pairs_train_labeled, dist_train_labeled)
'val_unlabeled_and_labeled' - (pairs_val_unlabeled,
dist_val_unlabeled, pairs_val_labeled, dist_val_labeled)
"""
ret = {}
# get data
x_train, x_test, y_train, y_test = load_data(params)
ret['cnc'] = {}
if params.get('use_all_data'):
x_train = np.concatenate((x_train, x_test), axis=0)
y_train = np.concatenate((y_train, y_test), axis=0)
x_test = np.zeros((0,) + x_train.shape[1:])
y_test = np.zeros((0,))
# split x training, validation, and test subsets
if 'val_set_fraction' not in params:
train_val_split = (.9, .1)
elif params['val_set_fraction'] > 0 and params['val_set_fraction'] <= 1:
train_val_split = (1 - params['val_set_fraction'],
params['val_set_fraction'])
else:
raise ValueError('val_set_fraction is invalid! must be in range (0, 1]')
# shuffle training and test data separately into themselves and concatenate
p = np.concatenate([
np.random.permutation(len(x_train)),
len(x_train) + np.random.permutation(len(x_test))
],
axis=0)
x_train, y_train, p_train, x_val, y_val, p_val = split_data(
x_train, y_train, train_val_split, permute=p[:len(x_train)])
# split training and validation subset into its supervised and unsupervised
if params.get('train_labeled_fraction'):
train_split = (1 - params['train_labeled_fraction'],
params['train_labeled_fraction'])
else:
train_split = (1, 0)
x_train_unlabeled, y_train_unlabeled, _, x_train_labeled, y_train_labeled, _ = split_data(
x_train, y_train, train_split)
if params.get('val_labeled_fraction'):
val_split = (1 - params['val_labeled_fraction'],
params['val_labeled_fraction'])
else:
val_split = (1, 0)
x_val_unlabeled, y_val_unlabeled, _, x_val_labeled, y_val_labeled, _ = split_data(
x_val, y_val, val_split)
# embed data in code space, if necessary
all_data = [
x_train, x_val, x_test, x_train_unlabeled, x_train_labeled,
x_val_unlabeled, x_val_labeled
]
if params.get('use_code_space'):
for i, d in enumerate(all_data):
all_data[i] = embed_data(d, dset=params['dset'], path=params['main_path'])
else:
# otherwise just flatten it
for i, d in enumerate(all_data):
all_data[i] = all_data[i].reshape((-1, np.prod(all_data[i].shape[1:])))
(x_train, x_val, x_test, x_train_unlabeled, x_train_labeled, x_val_unlabeled,
x_val_labeled) = all_data
# collect everything into a dictionary
ret['cnc']['train_and_test'] = (x_train, y_train, x_val, y_val, x_test,
y_test)
ret['cnc']['train_unlabeled_and_labeled'] = (x_train_unlabeled,
y_train_unlabeled,
x_train_labeled, y_train_labeled)
ret['cnc']['val_unlabeled_and_labeled'] = (x_val_unlabeled, y_val_unlabeled,
x_val_labeled, y_val_labeled)
ret['p_train'] = p_train
ret['p_val'] = p_val
# get siamese data if necessary
if 'siamese' in params['affinity']:
ret['siamese'] = {}
pairs_train_unlabeled, dist_train_unlabeled = pairs.create_pairs_from_unlabeled_data(
x1=x_train_unlabeled,
p=None,
k=params.get('siam_k'),
tot_pairs=params.get('siamese_tot_pairs'),
pre_shuffled=True,
)
pairs_val_unlabeled, dist_val_unlabeled = pairs.create_pairs_from_unlabeled_data(
x1=x_val_unlabeled,
p=None,
k=params.get('siam_k'),
tot_pairs=params.get('siamese_tot_pairs'),
pre_shuffled=True,
)
# get pairs for labeled data
class_indices = [
np.where(y_train_labeled == i)[0] for i in range(params['n_clusters'])
]
pairs_train_labeled, dist_train_labeled = pairs.create_pairs_from_labeled_data(
x_train_labeled, class_indices)
class_indices = [
np.where(y_val_labeled == i)[0] for i in range(params['n_clusters'])
]
pairs_val_labeled, dist_val_labeled = pairs.create_pairs_from_labeled_data(
x_val_labeled, class_indices)
ret['siamese']['train_unlabeled_and_labeled'] = (pairs_train_unlabeled,
dist_train_unlabeled,
pairs_train_labeled,
dist_train_labeled)
ret['siamese']['val_unlabeled_and_labeled'] = (pairs_val_unlabeled,
dist_val_unlabeled,
pairs_val_labeled,
dist_val_labeled)
# combine labeled and unlabeled pairs for training the siamese
print('pairs_train_unlabeled shape', pairs_train_unlabeled.shape)
print('pairs_train_labeled shape', pairs_train_labeled.shape)
pairs_train = np.concatenate((pairs_train_unlabeled, pairs_train_labeled),
axis=0)
dist_train = np.concatenate((dist_train_unlabeled, dist_train_labeled),
axis=0)
pairs_val = np.concatenate((pairs_val_unlabeled, pairs_val_labeled), axis=0)
dist_val = np.concatenate((dist_val_unlabeled, dist_val_labeled), axis=0)
ret['siamese']['train_and_test'] = (pairs_train, dist_train, pairs_val,
dist_val)
return ret
def load_data(params):
"""reads the data specified in params."""
if params['dset'] == 'mnist':
x_train, x_test, y_train, y_test = get_mnist()
else:
raise ValueError('Dataset provided ({}) is invalid!'.format(params['dset']))
return x_train, x_test, y_train, y_test
def embed_data(x, dset, path):
"""embeds x into the code space using the autoencoder."""
if x:
return np.zeros(shape=(0, 10))
# load model and weights
json_path = os.path.join(path, 'ae_{}.json'.format(dset))
print('load model from json file:', json_path)
with gfile.Open(json_path) as f:
pt_ae = model_from_json(f.read())
weights_path = os.path.join(path, 'ae_{}_weights.h5'.format(dset))
print('load code spase from:', weights_path)
local_filename = weights_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
gfile.Copy(weights_path, tmp_filename)
pt_ae.load_weights(tmp_filename)
gfile.Remove(tmp_filename)
print('***********************', x.shape)
x = x.reshape(-1, np.prod(x.shape[1:]))
print('***********************', x.shape)
get_embeddings = K.function([pt_ae.input], [pt_ae.layers[3].output])
get_reconstruction = K.function([pt_ae.layers[4].input], [pt_ae.output])
x_embedded = predict_with_k_fn(get_embeddings, x)[0]
x_recon = predict_with_k_fn(get_reconstruction, x_embedded)[0]
reconstruction_mse = np.mean(np.square(x - x_recon))
print(
'using pretrained embeddings; sanity check, total reconstruction error:',
np.mean(reconstruction_mse))
del pt_ae
return x_embedded
def predict_with_k_fn(k_fn, x, bs=1000):
"""evaluates x by k_fn(x), where k_fn is a Keras function, by batches of size 1000."""
if not isinstance(x, list):
x = [x]
num_outs = len(k_fn.outputs)
shape_y = k_fn.outputs[0].get_shape().as_list()
shape_y[0] = len(x[0])
y = [np.empty(shape_y) for _ in k_fn.outputs]
for i in range(int(x[0].shape[0] / bs + 1)):
x_batch = []
for x_ in x:
x_batch.append(x_[i * bs:(i + 1) * bs])
temp = k_fn(x_batch)
for j in range(num_outs):
y[j][i * bs:(i + 1) * bs] = temp[j]
return y
def split_data(x, y, split, permute=None):
"""Splits arrays x and y.
Args:
x: matrix of shape n x d1
y: matrix of shape n x d2
split: a list of floats of length 2 (e.g. [a1, a2]) where a, b > 0, a, b <
1, and a + b == 1
permute: a list or array of length n that can be used to shuffle x and y
identically before splitting it
Returns:
Splitted arrays of x and y
"""
n = len(x)
if permute is not None:
if not isinstance(permute, np.ndarray):
raise ValueError(
'Provided permute array should be an np.ndarray, not {}!'.format(
type(permute)))
if len(permute.shape) != 1:
raise ValueError(
'Provided permute array should be of dimension 1, not {}'.format(
len(permute.shape)))
if len(permute) != n:
raise ValueError(
'Provided permute should be the same length as x! (len(permute) = {}, n = {}'
.format(len(permute), n))
else:
permute = np.arange(n)
if np.sum(split) != 1:
raise ValueError('Split elements must sum to 1!')
ret_x_y_p = []
prev_idx = 0
for s in split:
idx = prev_idx + np.round(s * n).astype(np.int)
p_ = permute[prev_idx:idx]
x_ = x[p_]
y_ = y[p_]
prev_idx = idx
ret_x_y_p.append(x_)
ret_x_y_p.append(y_)
ret_x_y_p.append(p_)
return ret_x_y_p[0], ret_x_y_p[1], ret_x_y_p[2], ret_x_y_p[3], ret_x_y_p[
4], ret_x_y_p[5]
def get_mnist():
"""Returns the train and test splits of the MNIST digits dataset.
x_train and x_test are shaped into the tensorflow image data
shape and normalized to fit in the range [0, 1]
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape and standardize x arrays
x_train = np.expand_dims(x_train, -1) / 255.
x_test = np.expand_dims(x_test, -1) / 255.
return x_train, x_test, y_train, y_test
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Client implementation for talking to the geard daemon."""
import json
from zope.interface import Interface, implementer
from characteristic import attributes
from twisted.internet.defer import succeed, fail
from twisted.internet.task import deferLater
from twisted.internet import reactor
from treq import request, content
GEAR_PORT = 43273
class AlreadyExists(Exception):
"""A unit with the given name already exists."""
class GearError(Exception):
"""Unexpected error received from gear daemon."""
@attributes(["id", "variables"])
class GearEnvironment(object):
"""
A collection of Geard unit environment variables associated with an
environment ID.
"""
def to_dict(self):
"""
Convert to a dictionary suitable for serialising to JSON and then on to
the Gear REST API.
"""
variables = []
for k, v in self.variables.items():
variables.append(dict(name=k, value=v))
return dict(id=self.id, variables=variables)
@attributes(["name", "activation_state", "sub_state", "container_image",
"ports", "links", "environment"],
defaults=dict(sub_state=None, container_image=None,
ports=(), links=(), environment=None))
class Unit(object):
"""
Information about a unit managed by geard/systemd.
XXX: The container_image attribute defaults to `None` until we have a way
to interrogate geard for the docker images associated with its
containers. See https://github.com/ClusterHQ/flocker/issues/207
:ivar unicode name: The name of the unit.
:ivar unicode activation_state: The state of the unit in terms of
systemd activation. Values indicate whether the unit is installed
but not running (``u"inactive"``), starting (``u"activating"``),
running (``u"active"``), failed (``u"failed"``) stopping
(``u"deactivating"``) or stopped (either ``u"failed"`` or
``u"inactive"`` apparently). See
https://github.com/ClusterHQ/flocker/issues/187 about using constants
instead of strings.
:ivar unicode sub_state: The systemd substate of the unit. Certain Unit
types may have a number of additional substates, which are mapped to
the five generalized activation states above. See
http://www.freedesktop.org/software/systemd/man/systemd.html#Concepts
:ivar unicode container_image: The docker image name associated with this
gear unit
:ivar list ports: The ``PortMap`` instances which define how connections to
ports on the host are routed to ports exposed in the container.
:ivar list links: The ``PortMap`` instances which define how connections to
ports inside the container are routed to ports on the host.
:ivar GearEnvironment environment: A ``GearEnvironment`` whose variables
will be supplied to the gear unit or ``None`` if there are no
environment variables for this unit.
"""
class IGearClient(Interface):
"""
A client for the geard HTTP API.
Note the difference in semantics between the results of ``add()``
(firing does not indicate application started successfully)
vs. ``remove()`` (firing indicates application has finished shutting
down).
"""
def add(unit_name, image_name, ports=None, links=None, environment=None):
"""
Install and start a new unit.
Note that callers should not assume success indicates the unit has
finished starting up. In addition to asynchronous nature of gear,
even if container is up and running the application within it
might still be starting up, e.g. it may not have bound the
external ports yet. As a result the final success of application
startup is out of scope for this method.
:param unicode unit_name: The name of the unit to create.
:param unicode image_name: The Docker image to use for the unit.
:param list ports: A list of ``PortMap``\ s mapping ports exposed in
the container to ports exposed on the host. Default ``None`` means
that no port mappings will be configured for this unit.
:param list links: A list of ``PortMap``\ s mapping ports forwarded
from the container to ports on the host.
:param GearEnvironment environment: A ``GearEnvironment`` associating
key value pairs with an environment ID. Default ``None`` means that
no environment variables will be supplied to the unit.
:return: ``Deferred`` that fires on success, or errbacks with
:class:`AlreadyExists` if a unit by that name already exists.
"""
def exists(unit_name):
"""
Check whether the unit exists.
:param unicode unit_name: The name of the unit to create.
:return: ``Deferred`` that fires with ``True`` if unit exists,
otherwise ``False``.
"""
def remove(unit_name):
"""
Stop and delete the given unit.
This can be done multiple times in a row for the same unit.
:param unicode unit_name: The name of the unit to stop.
:return: ``Deferred`` that fires once the unit has been stopped
and removed.
"""
def list():
"""
List all known units.
:return: ``Deferred`` firing with ``set`` of :class:`Unit`.
"""
@implementer(IGearClient)
class GearClient(object):
"""Talk to the gear daemon over HTTP.
:ivar bytes _base_url: Base URL for gear.
"""
def __init__(self, hostname):
"""
:param unicode hostname: Gear host to connect to.
"""
self._base_url = b"http://%s:%d" % (hostname.encode("ascii"),
GEAR_PORT)
def _container_request(self, method, unit_name, operation=None, data=None):
"""Send HTTP request to gear.
:param bytes method: The HTTP method to send, e.g. ``b"GET"``.
:param unicode unit_name: The name of the unit.
:param operation: ``None``, or extra ``unicode`` path element to add to
the request URL path.
:param data: ``None``, or object with a body for the request that
can be serialized to JSON.
:return: A ``Defered`` that fires with a response object.
"""
path = b"/container/" + unit_name.encode("ascii")
if operation is not None:
path += b"/" + operation
return self._request(method, path, data=data)
def _request(self, method, path, data=None):
"""Send HTTP request to gear.
:param bytes method: The HTTP method to send, e.g. ``b"GET"``.
:param bytes path: Path to request.
:param data: ``None``, or object with a body for the request that
can be serialized to JSON.
:return: A ``Defered`` that fires with a response object.
"""
url = self._base_url + path
if data is not None:
data = json.dumps(data)
return request(method, url, data=data, persistent=False)
def _ensure_ok(self, response):
"""Make sure response indicates success.
Also reads the body to ensure connection is closed.
:param response: Response from treq request,
``twisted.web.iweb.IResponse`` provider.
:return: ``Deferred`` that errbacks with ``GearError`` if the response
is not successful (2xx HTTP response code).
"""
d = content(response)
# geard uses a variety of 2xx response codes. Filed treq issue
# about having "is this a success?" API:
# https://github.com/dreid/treq/issues/62
if response.code // 100 != 2:
d.addCallback(lambda data: fail(GearError(response.code, data)))
return d
def add(self, unit_name, image_name, ports=None, links=None,
environment=None):
"""
See ``IGearClient.add`` for base documentation.
Gear `NetworkLinks` are currently fixed to destination localhost. This
allows us to control the actual target of the link using proxy / nat
rules on the host machine without having to restart the gear unit.
XXX: If gear allowed us to reconfigure links this wouldn't be
necessary. See https://github.com/openshift/geard/issues/223
XXX: As long as we need to set the target as 127.0.0.1 its also worth
noting that gear will actually route the traffic to a non-loopback
address on the host. So if your service or NAT rule on the host is
configured for 127.0.0.1 only, it won't receive any traffic. See
https://github.com/openshift/geard/issues/224
XXX: If an environment is supplied, ``gear`` will create an environment
file with the given ID. But it will not remove that environment file
when this unit is removed or when there are no longer any references to
the environment ID. See https://github.com/ClusterHQ/flocker/issues/585
"""
if ports is None:
ports = []
if links is None:
links = []
data = {
u"Image": image_name, u"Started": True, u'Ports': [],
u'NetworkLinks': []}
for port in ports:
data['Ports'].append(
{u'Internal': port.internal_port,
u'External': port.external_port})
for link in links:
data['NetworkLinks'].append(
{u'FromHost': u'127.0.0.1',
u'FromPort': link.internal_port,
u'ToHost': u'127.0.0.1',
u'ToPort': link.external_port}
)
if environment is not None:
data['Environment'] = environment.to_dict()
checked = self.exists(unit_name)
checked.addCallback(
lambda exists: fail(AlreadyExists(unit_name)) if exists else None)
checked.addCallback(
lambda _: self._container_request(b"PUT", unit_name, data=data))
checked.addCallback(self._ensure_ok)
return checked
def exists(self, unit_name):
d = self.list()
def got_units(units):
return unit_name in [unit.name for unit in units]
d.addCallback(got_units)
return d
def remove(self, unit_name):
d = self._container_request(b"PUT", unit_name, operation=b"stopped")
d.addCallback(self._ensure_ok)
def check_if_stopped(_=None):
listing = self.list()
def got_listing(units):
matching_units = [unit for unit in units
if unit.name == unit_name]
if not matching_units:
return
unit = matching_units[0]
if unit.activation_state in (u"failed", u"inactive"):
return
return deferLater(reactor, 0.1, check_if_stopped)
listing.addCallback(got_listing)
return listing
d.addCallback(check_if_stopped)
d.addCallback(lambda _: self._container_request(b"DELETE", unit_name))
d.addCallback(self._ensure_ok)
return d
def list(self):
d = self._request(b"GET", b"/containers?all=1")
d.addCallback(content)
def got_body(data):
values = json.loads(data)[u"Containers"]
# XXX: GearClient.list should also return container_image
# information.
# See https://github.com/ClusterHQ/flocker/issues/207
# container_image=image_name,
return set([Unit(name=unit[u"Id"],
activation_state=unit[u"ActiveState"],
sub_state=unit[u"SubState"],
container_image=None)
for unit in values])
d.addCallback(got_body)
return d
@implementer(IGearClient)
class FakeGearClient(object):
"""In-memory fake that simulates talking to a gear daemon.
The state the the simulated units is stored in memory.
:ivar dict _units: See ``units`` of ``__init__``\ .
"""
def __init__(self, units=None):
"""
:param dict units: A dictionary of canned ``Unit``\ s which will be
manipulated and returned by the methods of this ``FakeGearClient``.
:type units: ``dict`` mapping `unit_name` to ``Unit``\ .
"""
if units is None:
units = {}
self._units = units
def add(self, unit_name, image_name, ports=(), links=(), environment=None):
if unit_name in self._units:
return fail(AlreadyExists(unit_name))
self._units[unit_name] = Unit(
name=unit_name,
container_image=image_name,
ports=ports,
links=links,
environment=environment,
activation_state=u'active'
)
return succeed(None)
def exists(self, unit_name):
return succeed(unit_name in self._units)
def remove(self, unit_name):
if unit_name in self._units:
del self._units[unit_name]
return succeed(None)
def list(self):
# XXX: This is a hack so that functional and unit tests that use
# GearClient.list can pass until the real GearClient.list can also
# return container_image information, ports and links.
# See https://github.com/ClusterHQ/flocker/issues/207
incomplete_units = set()
for unit in self._units.values():
incomplete_units.add(
Unit(name=unit.name, activation_state=unit.activation_state))
return succeed(incomplete_units)
@attributes(['internal_port', 'external_port'])
class PortMap(object):
"""
A record representing the mapping between a port exposed internally by a
docker container and the corresponding external port on the host.
:ivar int internal_port: The port number exposed by the container.
:ivar int external_port: The port number exposed by the host.
"""
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from staccato.openstack.common import excutils
from staccato.openstack.common.gettextutils import _
from staccato.openstack.common import importutils
from staccato.openstack.common import jsonutils
from staccato.openstack.common import processutils as utils
from staccato.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('staccato.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import unittest
import webob
from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
import nova.wsgi
FLAGS = flags.FLAGS
def return_create_instance_metadata_max(context, server_id, metadata):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata):
return stub_server_metadata()
def return_server_metadata(context, server_id):
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(FLAGS.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
class ServerMetaDataTest(unittest.TestCase):
def setUp(self):
super(ServerMetaDataTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
super(ServerMetaDataTest, self).tearDown()
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value1', res_dict['metadata']['key1'])
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(0, len(res_dict['metadata']))
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value5', res_dict['key5'])
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_metadata_delete',
delete_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
req.method = 'POST'
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('value1', res_dict['metadata']['key1'])
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/bad')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_too_many_metadata_items_on_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(FLAGS.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
json_string = str(data).replace("\'", "\"")
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
req.method = 'POST'
req.body = json_string
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_to_many_metadata_items_on_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata_max)
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
req.body = '{"a new key": "a new value"}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
|
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
# pylint: disable=E1101
import logging
import os
import re
import shutil
import socket
import subprocess
import sys
import tarfile
import time
from pexpect import EOF, TIMEOUT, pxssh
from wlauto import settings, Parameter
from wlauto.core.resource import NO_ONE
from wlauto.common.resources import Executable
from wlauto.core import signal as sig
from wlauto.exceptions import DeviceError
from wlauto.utils import ssh, types
class BaseGem5Device(object):
"""
Base implementation for a gem5-based device
This class is used as the base class for OS-specific devices such as the
G3m5LinuxDevice and the Gem5AndroidDevice. The majority of the gem5-specific
functionality is included here.
Note: When inheriting from this class, make sure to inherit from this class
prior to inheriting from the OS-specific class, i.e. LinuxDevice, to ensure
that the methods are correctly overridden.
"""
# gem5 can be very slow. Hence, we use some very long timeouts!
delay = 3600
long_delay = 3 * delay
ready_timeout = long_delay
default_timeout = delay
platform = None
path_module = 'posixpath'
parameters = [
Parameter('gem5_binary', kind=str, default='./build/ARM/gem5.fast',
mandatory=False, description="Command used to execute gem5. "
"Adjust according to needs."),
Parameter('gem5_args', kind=types.arguments, mandatory=True,
description="Command line passed to the gem5 simulation. This"
" command line is used to set up the simulated system, and "
"should be the same as used for a standard gem5 simulation "
"without workload automation. Note that this is simulation "
"script specific and will hence need to be tailored to each "
"particular use case."),
Parameter('gem5_vio_args', kind=types.arguments, mandatory=True,
constraint=lambda x: "{}" in str(x),
description="gem5 VirtIO command line used to enable the "
"VirtIO device in the simulated system. At the very least, "
"the root parameter of the VirtIO9PDiod device must be "
"exposed on the command line. Please set this root mount to "
"{}, as it will be replaced with the directory used by "
"Workload Automation at runtime."),
Parameter('temp_dir', kind=str, default='/tmp',
description="Temporary directory used to pass files into the "
"gem5 simulation. Workload Automation will automatically "
"create a directory in this folder, and will remove it again "
"once the simulation completes."),
Parameter('checkpoint', kind=bool, default=False,
mandatory=False, description="This parameter "
"tells Workload Automation to create a checkpoint of the "
"simulated system once the guest system has finished booting."
" This checkpoint can then be used at a later stage by other "
"WA runs to avoid booting the guest system a second time. Set"
" to True to take a checkpoint of the simulated system post "
"boot."),
Parameter('run_delay', kind=int, default=0, mandatory=False,
constraint=lambda x: x >= 0,
description="This sets the time that the "
"system should sleep in the simulated system prior to "
"running and workloads or taking checkpoints. This allows "
"the system to quieten down prior to running the workloads. "
"When this is combined with the checkpoint_post_boot"
" option, it allows the checkpoint to be created post-sleep,"
" and therefore the set of workloads resuming from this "
"checkpoint will not be required to sleep.")
]
@property
def is_rooted(self): # pylint: disable=R0201
# gem5 is always rooted
return True
# pylint: disable=E0203
def __init__(self):
self.logger = logging.getLogger('gem5Device')
# The gem5 subprocess
self.gem5 = None
self.gem5_port = -1
self.gem5outdir = os.path.join(settings.output_directory, "gem5")
self.m5_path = 'm5'
self.stdout_file = None
self.stderr_file = None
self.stderr_filename = None
self.sckt = None
# Find the first one that does not exist. Ensures that we do not re-use
# the directory used by someone else.
for i in xrange(sys.maxint):
directory = os.path.join(self.temp_dir, "wa_{}".format(i))
try:
os.stat(directory)
continue
except OSError:
break
self.temp_dir = directory
self.logger.debug("Using {} as the temporary directory.".format(self.temp_dir))
# Start the gem5 simulation when WA starts a run using a signal.
sig.connect(self.init_gem5, sig.RUN_START)
def validate(self):
# Assemble the virtio args
self.gem5_vio_args = str(self.gem5_vio_args).format(self.temp_dir) # pylint: disable=W0201
self.logger.debug("gem5 VirtIO command: {}".format(self.gem5_vio_args))
def init_gem5(self, _):
"""
Start gem5, find out the telnet port and connect to the simulation.
We first create the temporary directory used by VirtIO to pass files
into the simulation, as well as the gem5 output directory.We then create
files for the standard output and error for the gem5 process. The gem5
process then is started.
"""
self.logger.info("Creating temporary directory: {}".format(self.temp_dir))
os.mkdir(self.temp_dir)
os.mkdir(self.gem5outdir)
# We need to redirect the standard output and standard error for the
# gem5 process to a file so that we can debug when things go wrong.
f = os.path.join(self.gem5outdir, 'stdout')
self.stdout_file = open(f, 'w')
f = os.path.join(self.gem5outdir, 'stderr')
self.stderr_file = open(f, 'w')
# We need to keep this so we can check which port to use for the telnet
# connection.
self.stderr_filename = f
self.start_gem5()
def start_gem5(self):
"""
Starts the gem5 simulator, and parses the output to get the telnet port.
"""
self.logger.info("Starting the gem5 simulator")
command_line = "{} --outdir={}/gem5 {} {}".format(self.gem5_binary,
settings.output_directory,
self.gem5_args,
self.gem5_vio_args)
self.logger.debug("gem5 command line: {}".format(command_line))
self.gem5 = subprocess.Popen(command_line.split(),
stdout=self.stdout_file,
stderr=self.stderr_file)
while self.gem5_port == -1:
# Check that gem5 is running!
if self.gem5.poll():
raise DeviceError("The gem5 process has crashed with error code {}!".format(self.gem5.poll()))
# Open the stderr file
f = open(self.stderr_filename, 'r')
for line in f:
m = re.search(r"Listening\ for\ system\ connection\ on\ port\ (?P<port>\d+)", line)
if m:
port = int(m.group('port'))
if port >= 3456 and port < 5900:
self.gem5_port = port
f.close()
break
else:
time.sleep(1)
f.close()
def connect(self): # pylint: disable=R0912,W0201
"""
Connect to the gem5 simulation and wait for Android to boot. Then,
create checkpoints, and mount the VirtIO device.
"""
self.connect_gem5()
self.wait_for_boot()
if self.run_delay:
self.logger.info("Sleeping for {} seconds in the guest".format(self.run_delay))
self.gem5_shell("sleep {}".format(self.run_delay))
if self.checkpoint:
self.checkpoint_gem5()
self.mount_virtio()
self.logger.info("Creating the working directory in the simulated system")
self.gem5_shell('mkdir -p {}'.format(self.working_directory))
self._is_ready = True # pylint: disable=W0201
def wait_for_boot(self):
pass
def connect_gem5(self): # pylint: disable=R0912
"""
Connect to the telnet port of the gem5 simulation.
We connect, and wait for the prompt to be found. We do not use a timeout
for this, and wait for the prompt in a while loop as the gem5 simulation
can take many hours to reach a prompt when booting the system. We also
inject some newlines periodically to try and force gem5 to show a
prompt. Once the prompt has been found, we replace it with a unique
prompt to ensure that we are able to match it properly. We also disable
the echo as this simplifies parsing the output when executing commands
on the device.
"""
self.logger.info("Connecting to the gem5 simulation on port {}".format(self.gem5_port))
host = socket.gethostname()
port = self.gem5_port
# Connect to the gem5 telnet port. Use a short timeout here.
attempts = 0
while attempts < 10:
attempts += 1
try:
self.sckt = ssh.TelnetConnection()
self.sckt.login(host, 'None', port=port, auto_prompt_reset=False,
login_timeout=10)
break
except pxssh.ExceptionPxssh:
pass
else:
self.gem5.kill()
raise DeviceError("Failed to connect to the gem5 telnet session.")
self.logger.info("Connected! Waiting for prompt...")
# We need to find the prompt. It might be different if we are resuming
# from a checkpoint. Therefore, we test multiple options here.
prompt_found = False
while not prompt_found:
try:
self.login_to_device()
except TIMEOUT:
pass
try:
# Try and force a prompt to be shown
self.sckt.send('\n')
self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
prompt_found = True
except TIMEOUT:
pass
self.logger.info("Setting unique prompt...")
self.sckt.set_unique_prompt()
self.sckt.prompt()
self.logger.info("Prompt found and replaced with a unique string")
# We check that the prompt is what we think it should be. If not, we
# need to update the regex we use to match.
self.find_prompt()
self.sckt.setecho(False)
self.sync_gem5_shell()
self.resize_shell()
def get_properties(self, context): # pylint: disable=R0801
""" Get the property files from the device """
for propfile in self.property_files:
try:
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
outfile = os.path.join(context.host_working_directory, normname)
if self.is_file(propfile):
self.execute('cat {} > {}'.format(propfile, normname))
self.pull_file(normname, outfile)
elif self.is_directory(propfile):
self.get_directory(context, propfile)
continue
else:
continue
except DeviceError:
# We pull these files "opportunistically", so if a pull fails
# (e.g. we don't have permissions to read the file), just note
# it quietly (not as an error/warning) and move on.
self.logger.debug('Could not pull property file "{}"'.format(propfile))
return {}
def get_directory(self, context, directory):
""" Pull a directory from the device """
normname = directory.lstrip(self.path.sep).replace(self.path.sep, '.')
outdir = os.path.join(context.host_working_directory, normname)
temp_file = os.path.join(context.host_working_directory, "{}.tar".format(normname))
# Check that the folder exists
self.gem5_shell("ls -la {}".format(directory))
# Compress the folder
try:
self.gem5_shell("{} tar -cvf {}.tar {}".format(self.busybox, normname, directory))
except DeviceError:
self.logger.debug("Failed to run tar command on device! Not pulling {}".format(directory))
return
self.pull_file(normname, temp_file)
f = tarfile.open(temp_file, 'r')
os.mkdir(outdir)
f.extractall(outdir)
os.remove(temp_file)
def get_pids_of(self, process_name):
""" Returns a list of PIDs of all processes with the specified name. """
result = self.gem5_shell('ps | {} grep {}'.format(self.busybox, process_name),
check_exit_code=False).strip()
if result and 'not found' not in result and len(result.split('\n')) > 2:
return [int(x.split()[1]) for x in result.split('\n')]
else:
return []
def find_prompt(self):
prompt = r'\[PEXPECT\][\\\$\#]+ '
synced = False
while not synced:
self.sckt.send('\n')
i = self.sckt.expect([prompt, self.sckt.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.delay)
if i == 0:
synced = True
elif i == 1:
prompt = self.sckt.UNIQUE_PROMPT
synced = True
else:
prompt = re.sub(r'\$', r'\\\$', self.sckt.before.strip() + self.sckt.after.strip())
prompt = re.sub(r'\#', r'\\\#', prompt)
prompt = re.sub(r'\[', r'\[', prompt)
prompt = re.sub(r'\]', r'\]', prompt)
self.sckt.PROMPT = prompt
def close(self):
if self._logcat_poller:
self._logcat_poller.stop()
def reset(self):
self.logger.warn("Attempt to restart the gem5 device. This is not "
"supported!")
# pylint: disable=unused-argument
def push_file(self, source, dest, **kwargs):
"""
Push a file to the gem5 device using VirtIO
The file to push to the device is copied to the temporary directory on
the host, before being copied within the simulation to the destination.
Checks, in the form of 'ls' with error code checking, are performed to
ensure that the file is copied to the destination.
"""
filename = os.path.basename(source)
self.logger.debug("Pushing {} to device.".format(source))
self.logger.debug("temp_dir: {}".format(self.temp_dir))
self.logger.debug("dest: {}".format(dest))
self.logger.debug("filename: {}".format(filename))
# We need to copy the file to copy to the temporary directory
self.move_to_temp_dir(source)
# Back to the gem5 world
self.gem5_shell("ls -al /mnt/obb/{}".format(filename))
if self.busybox:
self.gem5_shell("{} cp /mnt/obb/{} {}".format(self.busybox, filename, dest))
else:
self.gem5_shell("cat /mnt/obb/{} > {}".format(filename, dest))
self.gem5_shell("sync")
self.gem5_shell("ls -al {}".format(dest))
self.gem5_shell("ls -al /mnt/obb/")
self.logger.debug("Push complete.")
# pylint: disable=unused-argument
def pull_file(self, source, dest, **kwargs):
"""
Pull a file from the gem5 device using m5 writefile
The file is copied to the local directory within the guest as the m5
writefile command assumes that the file is local. The file is then
written out to the host system using writefile, prior to being moved to
the destination on the host.
"""
filename = os.path.basename(source)
self.logger.debug("pull_file {} {}".format(source, filename))
# We don't check the exit code here because it is non-zero if the source
# and destination are the same. The ls below will cause an error if the
# file was not where we expected it to be.
self.gem5_shell("{} cp {} {}".format(self.busybox, source, filename),
check_exit_code=False)
self.gem5_shell("sync")
self.gem5_shell("ls -la {}".format(filename))
self.logger.debug('Finished the copy in the simulator')
self.gem5_util("writefile {}".format(filename))
if 'cpu' not in filename:
while not os.path.exists(os.path.join(self.gem5outdir, filename)):
time.sleep(1)
# Perform the local move
shutil.move(os.path.join(self.gem5outdir, filename), dest)
self.logger.debug("Pull complete.")
# pylint: disable=unused-argument
def delete_file(self, filepath, **kwargs):
""" Delete a file on the device """
self._check_ready()
self.gem5_shell("rm '{}'".format(filepath))
def file_exists(self, filepath):
""" Check if a file exists """
self._check_ready()
output = self.gem5_shell('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
try:
if int(output):
return True
except ValueError:
# If we cannot process the output, assume that there is no file
pass
return False
def disconnect(self):
"""
Close and disconnect from the gem5 simulation. Additionally, we remove
the temporary directory used to pass files into the simulation.
"""
self.logger.info("Gracefully terminating the gem5 simulation.")
try:
self.gem5_util("exit")
self.gem5.wait()
except EOF:
pass
self.logger.info("Removing the temporary directory")
try:
shutil.rmtree(self.temp_dir)
except OSError:
self.logger.warn("Failed to remove the temporary directory!")
# gem5 might be slow. Hence, we need to make the ping timeout very long.
def ping(self):
self.logger.debug("Pinging gem5 to see if it is still alive")
self.gem5_shell('ls /', timeout=self.longdelay)
# Additional Android-specific methods.
def forward_port(self, _): # pylint: disable=R0201
raise DeviceError('we do not need forwarding')
# gem5 should dump out a framebuffer. We can use this if it exists. Failing
# that, fall back to the parent class implementation.
def capture_screen(self, filepath):
file_list = os.listdir(self.gem5outdir)
screen_caps = []
for f in file_list:
if '.bmp' in f:
screen_caps.append(f)
if len(screen_caps) == 1:
# Bail out if we do not have image, and resort to the slower, built
# in method.
try:
import Image
gem5_image = os.path.join(self.gem5outdir, screen_caps[0])
temp_image = os.path.join(self.gem5outdir, "file.png")
im = Image.open(gem5_image)
im.save(temp_image, "PNG")
shutil.copy(temp_image, filepath)
os.remove(temp_image)
self.logger.debug("capture_screen: using gem5 screencap")
return True
except (shutil.Error, ImportError, IOError):
pass
return False
# pylint: disable=W0613
def execute(self, command, timeout=1000, check_exit_code=True, background=False,
as_root=False, busybox=False, **kwargs):
self._check_ready()
if as_root and not self.is_rooted:
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
if busybox:
if not self.is_rooted:
raise DeviceError('Attempting to execute "{}" with busybox. '.format(command) +
'Busybox can only be deployed to rooted devices.')
command = ' '.join([self.busybox, command])
if background:
self.logger.debug("Attempt to execute in background. Not supported "
"in gem5, hence ignored.")
return self.gem5_shell(command, as_root=as_root)
# Internal methods: do not use outside of the class.
def _check_ready(self):
"""
Check if the device is ready.
As this is gem5, we just assume that the device is ready once we have
connected to the gem5 simulation, and updated the prompt.
"""
if not self._is_ready:
raise DeviceError('Device not ready.')
def gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912
"""
Execute a command in the gem5 shell
This wraps the telnet connection to gem5 and processes the raw output.
This method waits for the shell to return, and then will try and
separate the output from the command from the command itself. If this
fails, warn, but continue with the potentially wrong output.
The exit code is also checked by default, and non-zero exit codes will
raise a DeviceError.
"""
conn = self.sckt
if sync:
self.sync_gem5_shell()
self.logger.debug("gem5_shell command: {}".format(command))
# Send the actual command
conn.send("{}\n".format(command))
# Wait for the response. We just sit here and wait for the prompt to
# appear, as gem5 might take a long time to provide the output. This
# avoids timeout issues.
command_index = -1
while command_index == -1:
if conn.prompt():
output = re.sub(r' \r([^\n])', r'\1', conn.before)
output = re.sub(r'[\b]', r'', output)
# Deal with line wrapping
output = re.sub(r'[\r].+?<', r'', output)
command_index = output.find(command)
# If we have -1, then we cannot match the command, but the
# prompt has returned. Hence, we have a bit of an issue. We
# warn, and return the whole output.
if command_index == -1:
self.logger.warn("gem5_shell: Unable to match command in "
"command output. Expect parsing errors!")
command_index = 0
output = output[command_index + len(command):].strip()
# It is possible that gem5 will echo the command. Therefore, we need to
# remove that too!
command_index = output.find(command)
if command_index != -1:
output = output[command_index + len(command):].strip()
self.logger.debug("gem5_shell output: {}".format(output))
# We get a second prompt. Hence, we need to eat one to make sure that we
# stay in sync. If we do not do this, we risk getting out of sync for
# slower simulations.
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
if check_exit_code:
exit_code_text = self.gem5_shell('echo $?', as_root=as_root,
timeout=timeout, check_exit_code=False,
sync=False)
try:
exit_code = int(exit_code_text.split()[0])
if exit_code:
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
raise DeviceError(message.format(exit_code, command, output))
except (ValueError, IndexError):
self.logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
return output
def gem5_util(self, command):
""" Execute a gem5 utility command using the m5 binary on the device """
self.gem5_shell('{} {}'.format(self.m5_path, command))
def sync_gem5_shell(self):
"""
Synchronise with the gem5 shell.
Write some unique text to the gem5 device to allow us to synchronise
with the shell output. We actually get two prompts so we need to match
both of these.
"""
self.logger.debug("Sending Sync")
self.sckt.send("echo \*\*sync\*\*\n")
self.sckt.expect(r"\*\*sync\*\*", timeout=self.delay)
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
def resize_shell(self):
"""
Resize the shell to avoid line wrapping issues.
"""
# Try and avoid line wrapping as much as possible. Don't check the error
# codes from these command because some of them WILL fail.
self.gem5_shell('stty columns 1024', check_exit_code=False)
self.gem5_shell('{} stty columns 1024'.format(self.busybox), check_exit_code=False)
self.gem5_shell('stty cols 1024', check_exit_code=False)
self.gem5_shell('{} stty cols 1024'.format(self.busybox), check_exit_code=False)
self.gem5_shell('reset', check_exit_code=False)
def move_to_temp_dir(self, source):
"""
Move a file to the temporary directory on the host for copying to the
gem5 device
"""
command = "cp {} {}".format(source, self.temp_dir)
self.logger.debug("Local copy command: {}".format(command))
subprocess.call(command.split())
subprocess.call("sync".split())
def checkpoint_gem5(self, end_simulation=False):
""" Checkpoint the gem5 simulation, storing all system state """
self.logger.info("Taking a post-boot checkpoint")
self.gem5_util("checkpoint")
if end_simulation:
self.disconnect()
def mount_virtio(self):
"""
Mount the VirtIO device in the simulated system.
"""
self.logger.info("Mounting VirtIO device in simulated system")
self.gem5_shell('mkdir -p /mnt/obb')
mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 /mnt/obb".format(self.temp_dir)
self.gem5_shell(mount_command)
def deploy_m5(self, context, force=False):
"""
Deploys the m5 binary to the device and returns the path to the binary
on the device.
:param force: by default, if the binary is already present on the
device, it will not be deployed again. Setting force to
``True`` overrides that behaviour and ensures that the
binary is always copied. Defaults to ``False``.
:returns: The on-device path to the m5 binary.
"""
on_device_executable = self.path.join(self.binaries_directory, 'm5')
if not force and self.file_exists(on_device_executable):
# We want to check the version of the binary. We cannot directly
# check this because the m5 binary itself is unversioned. We also
# need to make sure not to check the error code as "m5 --help"
# returns a non-zero error code.
output = self.gem5_shell('m5 --help', check_exit_code=False)
if "writefile" in output:
self.logger.debug("Using the m5 binary on the device...")
self.m5_path = on_device_executable
return on_device_executable
else:
self.logger.debug("m5 on device does not support writefile!")
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'm5'))
self.logger.info("Installing the m5 binary to the device...")
self.m5_path = self.install(host_file)
return self.m5_path
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the openhtf.exe module."""
import threading
import time
import unittest
import mock
import openhtf
from openhtf import plugs
from openhtf import util
from openhtf.core import phase_descriptor
from openhtf.core import phase_executor
from openhtf.core import phase_group
from openhtf.core import test_descriptor
from openhtf.core import test_executor
from openhtf.core import test_state
from openhtf.core.test_record import Outcome
from openhtf.util import conf
from openhtf.util import logs
from openhtf.util import timeouts
# Default logging to debug level.
logs.CLI_LOGGING_VERBOSITY = 2
class UnittestPlug(plugs.BasePlug):
return_continue_count = 4
def __init__(self):
self.count = 0
def setup_cap(self):
print('Set up the plugs instance.')
def tear_down_cap(self):
print('Tear down the plugs instance.')
def do_stuff(self):
print('Plugs-specific functionality.')
def increment(self):
self.count += 1
return self.count >= self.return_continue_count
class MoreRepeatsUnittestPlug(UnittestPlug):
return_continue_count = 100
class FailedPlugError(Exception):
"""Exception for the failed plug."""
FAIL_PLUG_MESSAGE = 'Failed'
class FailPlug(plugs.BasePlug):
def __init__(self):
raise FailedPlugError(FAIL_PLUG_MESSAGE)
@openhtf.PhaseOptions()
def start_phase(test):
test.dut_id = 'DUT ID'
@openhtf.PhaseOptions()
def phase_one(test, test_plug):
del test # Unused.
del test_plug # Unused.
time.sleep(0.01)
print('phase_one completed')
@plugs.plug(test_plug=UnittestPlug)
def phase_two(test, test_plug):
del test # Unused.
del test_plug # Unused.
time.sleep(0.02)
print('phase_two completed')
@openhtf.PhaseOptions(repeat_limit=4)
@plugs.plug(test_plug=UnittestPlug.placeholder)
def phase_repeat(test, test_plug):
del test # Unused.
time.sleep(0.01)
ret = test_plug.increment()
print('phase_repeat completed for %s time' % test_plug.count)
return openhtf.PhaseResult.CONTINUE if ret else openhtf.PhaseResult.REPEAT
@openhtf.PhaseOptions(run_if=lambda: False)
def phase_skip_from_run_if(test):
del test # Unused.
@openhtf.PhaseOptions()
def phase_return_skip(test):
del test # Unused.
return openhtf.PhaseResult.SKIP
@openhtf.PhaseOptions()
def phase_return_fail_and_continue(test):
del test # Unused.
return openhtf.PhaseResult.FAIL_AND_CONTINUE
@plugs.plug(fail=FailPlug)
def fail_plug_phase(fail):
del fail
def blank_phase():
pass
class TeardownError(Exception):
pass
def teardown_fail():
raise TeardownError()
def _abort_executor_in_thread(executor_abort):
# If we were to stop it in this phase, it eventually causes the phase
# to be killed using KillableThread, which raises ThreadTerminationError
# inside here, which really raises it inside wherever executor.stop() is.
# That leads to the stopping of the executor to get stopped itself at a
# random point in time. To make this deterministic, we keep the phase
# alive as long as the executor is running, which really just means that
# the wait() call gets the error raised in it.
inner_ev = threading.Event()
def abort_executor():
executor_abort()
inner_ev.set()
threading.Thread(target=abort_executor).start()
inner_ev.wait(1)
class TestExecutorTest(unittest.TestCase):
class TestDummyExceptionError(Exception):
"""Exception to be thrown by failure_phase."""
def setUp(self):
self.test_plug_type = UnittestPlug
def test_failures(self):
"""Tests that specified exception will cause FAIL not ERROR."""
@openhtf.PhaseOptions()
def failure_phase(test):
del test # Unused.
raise self.TestDummyExceptionError
# Configure test to throw exception midrun, and check that this causes
# Outcome = ERROR.
ev = threading.Event()
group = phase_group.PhaseGroup(
main=[failure_phase],
teardown=[lambda: ev.set()], # pylint: disable=unnecessary-lambda
)
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.ERROR)
# Same as above, but now specify that the TestDummyExceptionError should
# instead be a FAIL outcome.
test.configure(
failure_exceptions=[self.TestDummyExceptionError]
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.FAIL)
def test_plug_map(self):
test = openhtf.Test(phase_one, phase_two)
self.assertIn(self.test_plug_type, test.descriptor.plug_types)
# Mock test execution.
def test_test_executor(self):
mock_starter = mock.Mock(spec=test_executor.TestExecutor)
mock_starter.start()
mock_starter.wait()
mock_starter.abort()
def test_class_string(self):
check_list = ['PhaseExecutorThread', 'phase_one']
phase_thread = phase_executor.PhaseExecutorThread(phase_one, ' ')
name = str(phase_thread)
found = True
for item in check_list:
if item not in name:
found = False
if not found:
self.assertEqual(0, 1)
@conf.save_and_restore(cancel_timeout_s=1)
def test_cancel_start(self):
@openhtf.PhaseOptions()
def cancel_phase(test):
test.dut_id = 'DUT ID'
# We have 'executor' because we're inside the test method's scope.
# We have to run it in a thread to avoid getting a nasty series of
# confusing errors:
_abort_executor_in_thread(executor.abort)
ev = threading.Event()
group = phase_group.PhaseGroup(
teardown=[lambda: ev.set()], # pylint: disable=unnecessary-lambda
)
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
# Cancel during test start phase.
executor = test_executor.TestExecutor(
test.descriptor,
'uid',
cancel_phase,
test._test_options
)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.phases[0].name, cancel_phase.name)
# The test will end at the same time it starts because the test never
# actually started, we canceled it inside of test_start, resulting in a
# short vacuous start. Start and end times should be no more than a
# few milliseconds apart in that case.
self.assertLess(record.end_time_millis - record.start_time_millis, 4)
self.assertLessEqual(record.end_time_millis, util.time_millis())
# Teardown function should not be executed.
self.assertFalse(ev.wait(3))
def test_cancel_phase(self):
@openhtf.PhaseOptions()
def cancel_phase():
# See above cancel_phase for explanations.
_abort_executor_in_thread(executor.abort)
ev = threading.Event()
group = phase_group.PhaseGroup(main=[cancel_phase],
teardown=[lambda: ev.set()]) # pylint: disable=unnecessary-lambda
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.phases[0].name, start_phase.name)
self.assertLessEqual(record.start_time_millis, util.time_millis())
self.assertLessEqual(record.start_time_millis, record.end_time_millis)
self.assertLessEqual(record.end_time_millis, util.time_millis())
# Teardown function should be executed.
self.assertTrue(ev.wait(1))
executor.close()
def test_cancel_twice_phase(self):
def abort_twice():
executor.abort()
teardown_running.wait()
executor.abort()
@openhtf.PhaseOptions()
def cancel_twice_phase():
# See above cancel_phase for explanations.
_abort_executor_in_thread(abort_twice)
@openhtf.PhaseOptions()
def teardown_phase():
teardown_running.set()
# Sleeping for the entire duration has a race condition with cancellation.
timeout = timeouts.PolledTimeout(1)
while not timeout.has_expired():
time.sleep(0.01)
ev.set()
@openhtf.PhaseOptions()
def teardown2_phase():
ev2.set()
teardown_running = threading.Event()
ev = threading.Event()
ev2 = threading.Event()
group = phase_group.PhaseGroup(main=[cancel_twice_phase],
teardown=[teardown_phase, teardown2_phase])
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.phases[0].name, start_phase.name)
self.assertLessEqual(record.start_time_millis, util.time_millis())
self.assertLessEqual(record.start_time_millis, record.end_time_millis)
self.assertLessEqual(record.end_time_millis, util.time_millis())
# Teardown function should *NOT* be executed.
self.assertFalse(ev.is_set())
self.assertFalse(ev2.is_set())
executor.close()
def test_failure_during_plug_init(self):
ev = threading.Event()
group = phase_group.PhaseGroup(
main=[fail_plug_phase],
teardown=[lambda: ev.set()], # pylint: disable=unnecessary-lambda
)
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', None, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.ERROR)
self.assertEqual(record.outcome_details[0].code, FailedPlugError.__name__)
self.assertEqual(record.outcome_details[0].description, FAIL_PLUG_MESSAGE)
# Teardown function should *NOT* be executed.
self.assertFalse(ev.is_set())
executor.close()
def test_failure_during_start_phase_plug_init(self):
def never_gonna_run_phase():
ev2.set()
ev = threading.Event()
ev2 = threading.Event()
group = phase_group.PhaseGroup(
main=[never_gonna_run_phase],
teardown=[lambda: ev.set()], # pylint: disable=unnecessary-lambda
)
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor,
'uid',
fail_plug_phase,
test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.ERROR)
self.assertEqual(record.outcome_details[0].code, FailedPlugError.__name__)
self.assertEqual(record.outcome_details[0].description, FAIL_PLUG_MESSAGE)
# Teardown function should *NOT* be executed.
self.assertFalse(ev.is_set())
self.assertFalse(ev2.is_set())
def test_error_during_teardown(self):
group = phase_group.PhaseGroup(
main=[blank_phase], teardown=[teardown_fail])
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.ERROR)
self.assertEqual(record.outcome_details[0].code, TeardownError.__name__)
executor.close()
def test_log_during_teardown(self):
message = 'hello'
def teardown_log(test):
test.logger.info(message)
group = phase_group.PhaseGroup(
main=[blank_phase], teardown=[teardown_log])
test = openhtf.Test(group)
test.configure(
default_dut_id='dut',
)
executor = test_executor.TestExecutor(
test.descriptor, 'uid', start_phase, test._test_options)
executor.start()
executor.wait()
record = executor.test_state.test_record
self.assertEqual(record.outcome, Outcome.PASS)
log_records = [log_record for log_record in record.log_records
if log_record.message == message]
self.assertTrue(log_records)
executor.close()
class TestExecutorHandlePhaseTest(unittest.TestCase):
def setUp(self):
super(TestExecutorHandlePhaseTest, self).setUp()
self.test_state = mock.MagicMock(
spec=test_state.TestState,
plug_manager=plugs.PlugManager(),
execution_uid='01234567890',
state_logger=mock.MagicMock())
self.phase_exec = mock.MagicMock(
spec=phase_executor.PhaseExecutor)
self.test_exec = test_executor.TestExecutor(None, 'uid', None,
test_descriptor.TestOptions())
self.test_exec.test_state = self.test_state
self.test_exec._phase_exec = self.phase_exec
patcher = mock.patch.object(self.test_exec, '_execute_phase_group')
self.mock_execute_phase_group = patcher.start()
def testPhaseGroup_NotTerminal(self):
self.mock_execute_phase_group.return_value = False
group = phase_group.PhaseGroup(name='test')
self.assertFalse(self.test_exec._handle_phase(group))
self.mock_execute_phase_group.assert_called_once_with(group)
def testPhaseGroup_Terminal(self):
self.mock_execute_phase_group.return_value = True
group = phase_group.PhaseGroup(name='test')
self.assertTrue(self.test_exec._handle_phase(group))
self.mock_execute_phase_group.assert_called_once_with(group)
def testPhase_NotTerminal(self):
phase = phase_descriptor.PhaseDescriptor(blank_phase)
self.phase_exec.execute_phase.return_value = (
phase_executor.PhaseExecutionOutcome(
phase_descriptor.PhaseResult.CONTINUE))
self.assertFalse(self.test_exec._handle_phase(phase))
self.mock_execute_phase_group.assert_not_called()
self.phase_exec.execute_phase.assert_called_once_with(phase)
self.assertIsNone(self.test_exec._last_outcome)
def testPhase_NotTerminal_PreviousLastOutcome(self):
phase = phase_descriptor.PhaseDescriptor(blank_phase)
set_outcome = phase_executor.PhaseExecutionOutcome(None)
self.test_exec._last_outcome = set_outcome
self.phase_exec.execute_phase.return_value = (
phase_executor.PhaseExecutionOutcome(
phase_descriptor.PhaseResult.CONTINUE))
self.assertFalse(self.test_exec._handle_phase(phase))
self.mock_execute_phase_group.assert_not_called()
self.phase_exec.execute_phase.assert_called_once_with(phase)
self.assertIs(set_outcome, self.test_exec._last_outcome)
def testPhase_Terminal_SetLastOutcome(self):
phase = phase_descriptor.PhaseDescriptor(blank_phase)
outcome = phase_executor.PhaseExecutionOutcome(
phase_descriptor.PhaseResult.STOP)
self.phase_exec.execute_phase.return_value = outcome
self.assertTrue(self.test_exec._handle_phase(phase))
self.mock_execute_phase_group.assert_not_called()
self.phase_exec.execute_phase.assert_called_once_with(phase)
self.assertIs(outcome, self.test_exec._last_outcome)
def testPhase_Terminal_PreviousLastOutcome(self):
phase = phase_descriptor.PhaseDescriptor(blank_phase)
set_outcome = phase_executor.PhaseExecutionOutcome(None)
self.test_exec._last_outcome = set_outcome
outcome = phase_executor.PhaseExecutionOutcome(
phase_descriptor.PhaseResult.STOP)
self.phase_exec.execute_phase.return_value = outcome
self.assertTrue(self.test_exec._handle_phase(phase))
self.mock_execute_phase_group.assert_not_called()
self.phase_exec.execute_phase.assert_called_once_with(phase)
self.assertIs(set_outcome, self.test_exec._last_outcome)
class TestExecutorExecutePhasesTest(unittest.TestCase):
def setUp(self):
super(TestExecutorExecutePhasesTest, self).setUp()
self.test_state = mock.MagicMock(
spec=test_state.TestState,
plug_manager=plugs.PlugManager(),
execution_uid='01234567890',
state_logger=mock.MagicMock())
self.test_exec = test_executor.TestExecutor(None, 'uid', None,
test_descriptor.TestOptions())
self.test_exec.test_state = self.test_state
patcher = mock.patch.object(self.test_exec, '_handle_phase')
self.mock_handle_phase = patcher.start()
def testExecuteAbortable_NoPhases(self):
self.assertFalse(self.test_exec._execute_abortable_phases(
'main', (), 'group'))
self.mock_handle_phase.assert_not_called()
def testExecuteAbortable_Normal(self):
self.mock_handle_phase.side_effect = [False]
self.assertFalse(self.test_exec._execute_abortable_phases(
'main', ('normal',), 'group'))
self.mock_handle_phase.assert_called_once_with('normal')
def testExecuteAbortable_AbortedPrior(self):
self.test_exec.abort()
self.assertTrue(self.test_exec._execute_abortable_phases(
'main', ('not-run',), 'group'))
self.mock_handle_phase.assert_not_called()
def testExecuteAbortable_AbortedDuring(self):
self.mock_handle_phase.side_effect = lambda x: self.test_exec.abort()
self.assertTrue(self.test_exec._execute_abortable_phases(
'main', ('abort', 'not-run'), 'group'))
self.mock_handle_phase.assert_called_once_with('abort')
def testExecuteAbortable_Terminal(self):
self.mock_handle_phase.side_effect = [False, True]
self.assertTrue(self.test_exec._execute_abortable_phases(
'main', ('normal', 'abort', 'not_run'), 'group'))
self.assertEqual([mock.call('normal'), mock.call('abort')],
self.mock_handle_phase.call_args_list)
def testExecuteTeardown_Empty(self):
self.assertFalse(self.test_exec._execute_teardown_phases((), 'group'))
self.mock_handle_phase.assert_not_called()
def testExecuteTeardown_Normal(self):
self.mock_handle_phase.side_effect = [False]
self.assertFalse(self.test_exec._execute_teardown_phases(
('normal',), 'group'))
self.mock_handle_phase.assert_called_once_with('normal')
def testExecuteTeardown_AbortPrior(self):
self.test_exec.abort()
self.mock_handle_phase.side_effect = [False]
self.assertFalse(self.test_exec._execute_teardown_phases(
('normal',), 'group'))
self.mock_handle_phase.assert_called_once_with('normal')
def testExecuteTeardown_AbortedDuring(self):
def handle_phase(fake_phase):
if fake_phase == 'abort':
self.test_exec.abort()
return False
self.mock_handle_phase.side_effect = handle_phase
self.assertFalse(self.test_exec._execute_teardown_phases(
('abort', 'still-run'), 'group'))
self.mock_handle_phase.assert_has_calls(
[mock.call('abort'), mock.call('still-run')])
def testExecuteTeardown_Terminal(self):
def handle_phase(fake_phase):
if fake_phase == 'error':
return True
return False
self.mock_handle_phase.side_effect = handle_phase
self.assertTrue(self.test_exec._execute_teardown_phases(
('error', 'still-run'), 'group'))
self.mock_handle_phase.assert_has_calls(
[mock.call('error'), mock.call('still-run')])
class TestExecutorExecutePhaseGroupTest(unittest.TestCase):
def setUp(self):
super(TestExecutorExecutePhaseGroupTest, self).setUp()
self.test_state = mock.MagicMock(
spec=test_state.TestState,
plug_manager=plugs.PlugManager(),
execution_uid='01234567890',
state_logger=mock.MagicMock())
self.test_exec = test_executor.TestExecutor(None, 'uid', None,
test_descriptor.TestOptions())
self.test_exec.test_state = self.test_state
patcher = mock.patch.object(self.test_exec, '_execute_abortable_phases')
self.mock_execute_abortable = patcher.start()
patcher = mock.patch.object(self.test_exec, '_execute_teardown_phases')
self.mock_execute_teardown = patcher.start()
def setup():
pass
self._setup = setup
def main():
pass
self._main = main
@openhtf.PhaseOptions(timeout_s=30)
def teardown():
pass
self._teardown = teardown
self.group = phase_group.PhaseGroup(
setup=[setup], main=[main], teardown=[teardown], name='group')
def testStopDuringSetup(self):
self.mock_execute_abortable.return_value = True
self.assertTrue(self.test_exec._execute_phase_group(self.group))
self.mock_execute_abortable.assert_called_once_with(
'setup', (self._setup,), 'group')
self.mock_execute_teardown.assert_not_called()
def testStopDuringMain(self):
self.mock_execute_abortable.side_effect = [False, True]
self.mock_execute_teardown.return_value = False
self.assertTrue(self.test_exec._execute_phase_group(self.group))
self.mock_execute_abortable.assert_has_calls([
mock.call('setup', (self._setup,), 'group'),
mock.call('main', (self._main,), 'group'),
])
self.mock_execute_teardown.assert_called_once_with(
(self._teardown,), 'group')
def testStopDuringTeardown(self):
self.mock_execute_abortable.return_value = False
self.mock_execute_teardown.return_value = True
self.assertTrue(self.test_exec._execute_phase_group(self.group))
self.mock_execute_abortable.assert_has_calls([
mock.call('setup', (self._setup,), 'group'),
mock.call('main', (self._main,), 'group'),
])
self.mock_execute_teardown.assert_called_once_with(
(self._teardown,), 'group')
def testNoStop(self):
self.mock_execute_abortable.return_value = False
self.mock_execute_teardown.return_value = False
self.assertFalse(self.test_exec._execute_phase_group(self.group))
self.mock_execute_abortable.assert_has_calls([
mock.call('setup', (self._setup,), 'group'),
mock.call('main', (self._main,), 'group'),
])
self.mock_execute_teardown.assert_called_once_with(
(self._teardown,), 'group')
class PhaseExecutorTest(unittest.TestCase):
def setUp(self):
super(PhaseExecutorTest, self).setUp()
self.test_state = mock.MagicMock(
spec=test_state.TestState,
plug_manager=plugs.PlugManager(),
execution_uid='01234567890',
state_logger=mock.MagicMock())
self.test_state.plug_manager.initialize_plugs([
UnittestPlug, MoreRepeatsUnittestPlug])
self.phase_executor = phase_executor.PhaseExecutor(self.test_state)
def test_execute_continue_phase(self):
result = self.phase_executor.execute_phase(phase_two)
self.assertEqual(openhtf.PhaseResult.CONTINUE, result.phase_result)
def test_execute_repeat_okay_phase(self):
result = self.phase_executor.execute_phase(
phase_repeat.with_plugs(test_plug=UnittestPlug))
self.assertEqual(openhtf.PhaseResult.CONTINUE, result.phase_result)
def test_execute_repeat_limited_phase(self):
result = self.phase_executor.execute_phase(
phase_repeat.with_plugs(test_plug=MoreRepeatsUnittestPlug))
self.assertEqual(openhtf.PhaseResult.STOP, result.phase_result)
def test_execute_run_if_false(self):
result = self.phase_executor.execute_phase(phase_skip_from_run_if)
self.assertEqual(openhtf.PhaseResult.SKIP, result.phase_result)
def test_execute_phase_return_skip(self):
result = self.phase_executor.execute_phase(phase_return_skip)
self.assertEqual(openhtf.PhaseResult.SKIP, result.phase_result)
def test_execute_phase_return_fail_and_continue(self):
result = self.phase_executor.execute_phase(phase_return_fail_and_continue)
self.assertEqual(openhtf.PhaseResult.FAIL_AND_CONTINUE, result.phase_result)
|
|
"""Config flow for Apple TV integration."""
from ipaddress import ip_address
import logging
from random import randrange
from pyatv import exceptions, pair, scan
from pyatv.const import Protocol
from pyatv.convert import protocol_str
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_ADDRESS,
CONF_NAME,
CONF_PIN,
CONF_PROTOCOL,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import AbortFlow
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
DEVICE_INPUT = "device_input"
INPUT_PIN_SCHEMA = vol.Schema({vol.Required(CONF_PIN, default=None): int})
DEFAULT_START_OFF = False
PROTOCOL_PRIORITY = [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay]
async def device_scan(identifier, loop, cache=None):
"""Scan for a specific device using identifier as filter."""
def _filter_device(dev):
if identifier is None:
return True
if identifier == str(dev.address):
return True
if identifier == dev.name:
return True
return any(service.identifier == identifier for service in dev.services)
def _host_filter():
try:
return [ip_address(identifier)]
except ValueError:
return None
if cache:
matches = [atv for atv in cache if _filter_device(atv)]
if matches:
return cache, matches[0]
for hosts in [_host_filter(), None]:
scan_result = await scan(loop, timeout=3, hosts=hosts)
matches = [atv for atv in scan_result if _filter_device(atv)]
if matches:
return scan_result, matches[0]
return scan_result, None
def is_valid_credentials(credentials):
"""Verify that credentials are valid for establishing a connection."""
return (
credentials.get(Protocol.MRP.value) is not None
or credentials.get(Protocol.DMAP.value) is not None
)
class AppleTVConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Apple TV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow for this handler."""
return AppleTVOptionsFlow(config_entry)
def __init__(self):
"""Initialize a new AppleTVConfigFlow."""
self.target_device = None
self.scan_result = None
self.atv = None
self.protocol = None
self.pairing = None
self.credentials = {} # Protocol -> credentials
async def async_step_reauth(self, info):
"""Handle initial step when updating invalid credentials."""
await self.async_set_unique_id(info[CONF_IDENTIFIER])
self.target_device = info[CONF_IDENTIFIER]
self.context["title_placeholders"] = {"name": info[CONF_NAME]}
self.context["identifier"] = self.unique_id
return await self.async_step_reconfigure()
async def async_step_reconfigure(self, user_input=None):
"""Inform user that reconfiguration is about to start."""
if user_input is not None:
return await self.async_find_device_wrapper(
self.async_begin_pairing, allow_exist=True
)
return self.async_show_form(step_id="reconfigure")
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
# Be helpful to the user and look for devices
if self.scan_result is None:
self.scan_result, _ = await device_scan(None, self.hass.loop)
errors = {}
default_suggestion = self._prefill_identifier()
if user_input is not None:
self.target_device = user_input[DEVICE_INPUT]
try:
await self.async_find_device()
except DeviceNotFound:
errors["base"] = "no_devices_found"
except DeviceAlreadyConfigured:
errors["base"] = "already_configured"
except exceptions.NoServiceError:
errors["base"] = "no_usable_service"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(
self.atv.identifier, raise_on_progress=False
)
return await self.async_step_confirm()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(DEVICE_INPUT, default=default_suggestion): str}
),
errors=errors,
description_placeholders={"devices": self._devices_str()},
)
async def async_step_zeroconf(self, discovery_info):
"""Handle device found via zeroconf."""
service_type = discovery_info[CONF_TYPE]
properties = discovery_info["properties"]
if service_type == "_mediaremotetv._tcp.local.":
identifier = properties["UniqueIdentifier"]
name = properties["Name"]
elif service_type == "_touch-able._tcp.local.":
identifier = discovery_info["name"].split(".")[0]
name = properties["CtlN"]
else:
return self.async_abort(reason="unknown")
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
self.context["identifier"] = self.unique_id
self.context["title_placeholders"] = {"name": name}
self.target_device = identifier
return await self.async_find_device_wrapper(self.async_step_confirm)
async def async_find_device_wrapper(self, next_func, allow_exist=False):
"""Find a specific device and call another function when done.
This function will do error handling and bail out when an error
occurs.
"""
try:
await self.async_find_device(allow_exist)
except DeviceNotFound:
return self.async_abort(reason="no_devices_found")
except DeviceAlreadyConfigured:
return self.async_abort(reason="already_configured")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
return await next_func()
async def async_find_device(self, allow_exist=False):
"""Scan for the selected device to discover services."""
self.scan_result, self.atv = await device_scan(
self.target_device, self.hass.loop, cache=self.scan_result
)
if not self.atv:
raise DeviceNotFound()
self.protocol = self.atv.main_service().protocol
if not allow_exist:
for identifier in self.atv.all_identifiers:
if identifier in self._async_current_ids():
raise DeviceAlreadyConfigured()
# If credentials were found, save them
for service in self.atv.services:
if service.credentials:
self.credentials[service.protocol.value] = service.credentials
async def async_step_confirm(self, user_input=None):
"""Handle user-confirmation of discovered node."""
if user_input is not None:
return await self.async_begin_pairing()
return self.async_show_form(
step_id="confirm", description_placeholders={"name": self.atv.name}
)
async def async_begin_pairing(self):
"""Start pairing process for the next available protocol."""
self.protocol = self._next_protocol_to_pair()
# Dispose previous pairing sessions
if self.pairing is not None:
await self.pairing.close()
self.pairing = None
# Any more protocols to pair? Else bail out here
if not self.protocol:
await self.async_set_unique_id(self.atv.main_service().identifier)
return self._async_get_entry(
self.atv.main_service().protocol,
self.atv.name,
self.credentials,
self.atv.address,
)
# Initiate the pairing process
abort_reason = None
session = async_get_clientsession(self.hass)
self.pairing = await pair(
self.atv, self.protocol, self.hass.loop, session=session
)
try:
await self.pairing.begin()
except exceptions.ConnectionFailedError:
return await self.async_step_service_problem()
except exceptions.BackOffError:
abort_reason = "backoff"
except exceptions.PairingError:
_LOGGER.exception("Authentication problem")
abort_reason = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
abort_reason = "unknown"
if abort_reason:
if self.pairing:
await self.pairing.close()
return self.async_abort(reason=abort_reason)
# Choose step depending on if PIN is required from user or not
if self.pairing.device_provides_pin:
return await self.async_step_pair_with_pin()
return await self.async_step_pair_no_pin()
async def async_step_pair_with_pin(self, user_input=None):
"""Handle pairing step where a PIN is required from the user."""
errors = {}
if user_input is not None:
try:
self.pairing.pin(user_input[CONF_PIN])
await self.pairing.finish()
self.credentials[self.protocol.value] = self.pairing.service.credentials
return await self.async_begin_pairing()
except exceptions.PairingError:
_LOGGER.exception("Authentication problem")
errors["base"] = "invalid_auth"
except AbortFlow:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="pair_with_pin",
data_schema=INPUT_PIN_SCHEMA,
errors=errors,
description_placeholders={"protocol": protocol_str(self.protocol)},
)
async def async_step_pair_no_pin(self, user_input=None):
"""Handle step where user has to enter a PIN on the device."""
if user_input is not None:
await self.pairing.finish()
if self.pairing.has_paired:
self.credentials[self.protocol.value] = self.pairing.service.credentials
return await self.async_begin_pairing()
await self.pairing.close()
return self.async_abort(reason="device_did_not_pair")
pin = randrange(1000, stop=10000)
self.pairing.pin(pin)
return self.async_show_form(
step_id="pair_no_pin",
description_placeholders={
"protocol": protocol_str(self.protocol),
"pin": pin,
},
)
async def async_step_service_problem(self, user_input=None):
"""Inform user that a service will not be added."""
if user_input is not None:
self.credentials[self.protocol.value] = None
return await self.async_begin_pairing()
return self.async_show_form(
step_id="service_problem",
description_placeholders={"protocol": protocol_str(self.protocol)},
)
def _async_get_entry(self, protocol, name, credentials, address):
if not is_valid_credentials(credentials):
return self.async_abort(reason="invalid_config")
data = {
CONF_PROTOCOL: protocol.value,
CONF_NAME: name,
CONF_CREDENTIALS: credentials,
CONF_ADDRESS: str(address),
}
self._abort_if_unique_id_configured(reload_on_update=False, updates=data)
return self.async_create_entry(title=name, data=data)
def _next_protocol_to_pair(self):
def _needs_pairing(protocol):
if self.atv.get_service(protocol) is None:
return False
return protocol.value not in self.credentials
for protocol in PROTOCOL_PRIORITY:
if _needs_pairing(protocol):
return protocol
return None
def _devices_str(self):
return ", ".join(
[
f"`{atv.name} ({atv.address})`"
for atv in self.scan_result
if atv.identifier not in self._async_current_ids()
]
)
def _prefill_identifier(self):
# Return identifier (address) of one device that has not been paired with
for atv in self.scan_result:
if atv.identifier not in self._async_current_ids():
return str(atv.address)
return ""
class AppleTVOptionsFlow(config_entries.OptionsFlow):
"""Handle Apple TV options."""
def __init__(self, config_entry):
"""Initialize Apple TV options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None):
"""Manage the Apple TV options."""
if user_input is not None:
self.options[CONF_START_OFF] = user_input[CONF_START_OFF]
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_START_OFF,
default=self.config_entry.options.get(
CONF_START_OFF, DEFAULT_START_OFF
),
): bool,
}
),
)
class DeviceNotFound(HomeAssistantError):
"""Error to indicate device could not be found."""
class DeviceAlreadyConfigured(HomeAssistantError):
"""Error to indicate device is already configured."""
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from datetime import datetime
import trac.tests.compat
from trac.test import EnvironmentStub, Mock
from trac.util.datefmt import to_utimestamp, utc
from trac.versioncontrol import Repository, Changeset, Node, NoSuchChangeset
from trac.versioncontrol.cache import CachedRepository
import unittest
class CacheTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.log = self.env.log
self.env.db_transaction.executemany(
"INSERT INTO repository (id, name, value) VALUES (%s, %s, %s)",
[(1, 'name', 'test-repos'),
(1, 'youngest_rev', '')])
def tearDown(self):
self.env.reset_db()
# Helpers
def get_repos(self, get_changeset=None, youngest_rev=1):
if get_changeset is None:
def no_changeset(rev):
raise NoSuchChangeset(rev)
get_changeset = no_changeset
return Mock(Repository, 'test-repos', {'name': 'test-repos', 'id': 1},
self.log,
get_changeset=get_changeset,
get_oldest_rev=lambda: 0,
get_youngest_rev=lambda: youngest_rev,
normalize_rev=lambda x: get_changeset(x).rev,
next_rev=(lambda x: int(x) < youngest_rev and x + 1 \
or None))
def preset_cache(self, *args):
"""Each arg is a (rev tuple, changes list of tuples) pair."""
with self.env.db_transaction as db:
for rev, changes in args:
db("""INSERT INTO revision (repos, rev, time, author, message)
VALUES (1,%s,%s,%s,%s)
""", rev)
if changes:
db.executemany("""
INSERT INTO node_change (repos, rev, path, node_type,
change_type, base_path,
base_rev)
VALUES (1, %s, %s, %s, %s, %s, %s)
""", [(rev[0],) + change for change in changes])
db("""UPDATE repository SET value=%s
WHERE id=1 AND name='youngest_rev'
""", (args[-1][0][0],))
# Tests
def test_repr(self):
repos = self.get_repos()
cache = CachedRepository(self.env, repos, self.log)
self.assertEqual("<CachedRepository 1 'test-repos' '/'>",
repr(cache))
def test_initial_sync_with_empty_repos(self):
repos = self.get_repos()
cache = CachedRepository(self.env, repos, self.log)
cache.sync()
with self.env.db_query as db:
self.assertEqual([], db(
"SELECT rev, time, author, message FROM revision"))
self.assertEqual(0, db("SELECT COUNT(*) FROM node_change")[0][0])
def test_initial_sync(self):
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=1)
changes = [('trunk', Node.DIRECTORY, Changeset.ADD, None, None),
('trunk/README', Node.FILE, Changeset.ADD, None, None)]
changesets = [Mock(Changeset, repos, 0, '', '', t1,
get_changes=lambda: []),
Mock(Changeset, repos, 1, 'Import', 'joe', t2,
get_changes=lambda: iter(changes))]
cache = CachedRepository(self.env, repos, self.log)
cache.sync()
with self.env.db_query as db:
rows = db("SELECT rev, time, author, message FROM revision")
self.assertEqual(len(rows), 2)
self.assertEqual(('0', to_utimestamp(t1), '', ''), rows[0])
self.assertEqual(('1', to_utimestamp(t2), 'joe', 'Import'),
rows[1])
rows = db("""
SELECT rev, path, node_type, change_type, base_path, base_rev
FROM node_change""")
self.assertEqual(len(rows), 2)
self.assertEqual(('1', 'trunk', 'D', 'A', None, None), rows[0])
self.assertEqual(('1', 'trunk/README', 'F', 'A', None, None),
rows[1])
def test_update_sync(self):
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
t3 = datetime(2003, 1, 1, 1, 1, 1, 0, utc)
self.preset_cache(
(('0', to_utimestamp(t1), '', ''), []),
(('1', to_utimestamp(t2), 'joe', 'Import'),
[('trunk', 'D', 'A', None, None),
('trunk/README', 'F', 'A', None, None)]),
)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=2)
changes = [('trunk/README', Node.FILE, Changeset.EDIT, 'trunk/README',
1)]
changesets = [
None,
Mock(Changeset, repos, 1, '', '', t2, get_changes=lambda: []),
Mock(Changeset, repos, 2, 'Update', 'joe', t3,
get_changes=lambda: iter(changes))
]
cache = CachedRepository(self.env, repos, self.log)
cache.sync()
with self.env.db_query as db:
self.assertEqual([(to_utimestamp(t3), 'joe', 'Update')],
db("SELECT time, author, message FROM revision WHERE rev='2'"))
self.assertEqual([('trunk/README', 'F', 'E', 'trunk/README',
'1')],
db("""SELECT path, node_type, change_type, base_path,
base_rev
FROM node_change WHERE rev='2'"""))
def test_clean_sync(self):
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
t3 = datetime(2003, 1, 1, 1, 1, 1, 0, utc)
self.preset_cache(
(('0', to_utimestamp(t1), '', ''), []),
(('1', to_utimestamp(t2), 'joe', 'Import'),
[('trunk', 'D', 'A', None, None),
('trunk/README', 'F', 'A', None, None)]),
)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=2)
changes1 = [('trunk', Node.DIRECTORY, Changeset.ADD, None, None),
('trunk/README', Node.FILE, Changeset.ADD, None, None)]
changes2 = [('trunk/README', Node.FILE, Changeset.EDIT, 'trunk/README',
1)]
changesets = [
Mock(Changeset, repos, 0, '**empty**', 'joe', t1,
get_changes=lambda: []),
Mock(Changeset, repos, 1, 'Initial Import', 'joe', t2,
get_changes=lambda: iter(changes1)),
Mock(Changeset, repos, 2, 'Update', 'joe', t3,
get_changes=lambda: iter(changes2))
]
cache = CachedRepository(self.env, repos, self.log)
cache.sync(clean=True)
rows = self.env.db_query("""
SELECT time, author, message FROM revision ORDER BY rev
""")
self.assertEqual(3, len(rows))
self.assertEqual((to_utimestamp(t1), 'joe', '**empty**'), rows[0])
self.assertEqual((to_utimestamp(t2), 'joe', 'Initial Import'),
rows[1])
self.assertEqual((to_utimestamp(t3), 'joe', 'Update'), rows[2])
rows = self.env.db_query("""
SELECT rev, path, node_type, change_type, base_path, base_rev
FROM node_change ORDER BY rev, path""")
self.assertEqual(3, len(rows))
self.assertEqual(('1', 'trunk', 'D', 'A', None, None), rows[0])
self.assertEqual(('1', 'trunk/README', 'F', 'A', None, None), rows[1])
self.assertEqual(('2', 'trunk/README', 'F', 'E', 'trunk/README', '1'),
rows[2])
def test_sync_changeset(self):
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
self.preset_cache(
(('0', to_utimestamp(t1), '', ''), []),
(('1', to_utimestamp(t2), 'joe', 'Import'),
[('trunk', 'D', 'A', None, None),
('trunk/README', 'F', 'A', None, None)]),
)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=1)
changes1 = [('trunk', Node.DIRECTORY, Changeset.ADD, None, None),
('trunk/README', Node.FILE, Changeset.ADD, None, None)]
changesets = [
Mock(Changeset, repos, 0, '**empty**', 'joe', t1,
get_changes=lambda: []),
Mock(Changeset, repos, 1, 'Initial Import', 'joe', t2,
get_changes=lambda: iter(changes1)),
]
cache = CachedRepository(self.env, repos, self.log)
cache.sync_changeset(0)
rows = self.env.db_query(
"SELECT time, author, message FROM revision ORDER BY rev")
self.assertEqual(2, len(rows))
self.assertEqual((to_utimestamp(t1), 'joe', '**empty**'), rows[0])
self.assertEqual((to_utimestamp(t2), 'joe', 'Import'), rows[1])
def test_sync_changeset_if_not_exists(self):
t = [
datetime(2001, 1, 1, 1, 1, 1, 0, utc), # r0
datetime(2002, 1, 1, 1, 1, 1, 0, utc), # r1
datetime(2003, 1, 1, 1, 1, 1, 0, utc), # r2
datetime(2004, 1, 1, 1, 1, 1, 0, utc), # r3
]
self.preset_cache(
(('0', to_utimestamp(t[0]), 'joe', '**empty**'), []),
(('1', to_utimestamp(t[1]), 'joe', 'Import'),
[('trunk', 'D', 'A', None, None),
('trunk/README', 'F', 'A', None, None)]),
# not exists r2
(('3', to_utimestamp(t[3]), 'joe', 'Add COPYING'),
[('trunk/COPYING', 'F', 'A', None, None)]),
)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=3)
changes = [
None, # r0
[('trunk', Node.DIRECTORY, Changeset.ADD, None, None), # r1
('trunk/README', Node.FILE, Changeset.ADD, None, None)],
[('branches', Node.DIRECTORY, Changeset.ADD, None, None), # r2
('tags', Node.DIRECTORY, Changeset.ADD, None, None)],
[('trunk/COPYING', Node.FILE, Changeset.ADD, None, None)], # r3
]
changesets = [
Mock(Changeset, repos, 0, '**empty**', 'joe', t[0],
get_changes=lambda: []),
Mock(Changeset, repos, 1, 'Initial Import', 'joe', t[1],
get_changes=lambda: iter(changes[1])),
Mock(Changeset, repos, 2, 'Created directories', 'john', t[2],
get_changes=lambda: iter(changes[2])),
Mock(Changeset, repos, 3, 'Add COPYING', 'joe', t[3],
get_changes=lambda: iter(changes[3])),
]
cache = CachedRepository(self.env, repos, self.log)
self.assertRaises(NoSuchChangeset, cache.get_changeset, 2)
cache.sync()
self.assertRaises(NoSuchChangeset, cache.get_changeset, 2)
self.assertIsNone(cache.sync_changeset(2))
cset = cache.get_changeset(2)
self.assertEqual('john', cset.author)
self.assertEqual('Created directories', cset.message)
self.assertEqual(t[2], cset.date)
cset_changes = cset.get_changes()
self.assertEqual(('branches', Node.DIRECTORY, Changeset.ADD, None,
None),
cset_changes.next())
self.assertEqual(('tags', Node.DIRECTORY, Changeset.ADD, None, None),
cset_changes.next())
self.assertRaises(StopIteration, cset_changes.next)
rows = self.env.db_query(
"SELECT time,author,message FROM revision ORDER BY rev")
self.assertEqual(4, len(rows))
self.assertEqual((to_utimestamp(t[0]), 'joe', '**empty**'), rows[0])
self.assertEqual((to_utimestamp(t[1]), 'joe', 'Import'), rows[1])
self.assertEqual((to_utimestamp(t[2]), 'john', 'Created directories'),
rows[2])
self.assertEqual((to_utimestamp(t[3]), 'joe', 'Add COPYING'), rows[3])
def test_sync_changeset_with_string_rev(self): # ticket:11660
class MockCachedRepository(CachedRepository):
def db_rev(self, rev):
return '%010d' % rev
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
repos = self.get_repos(get_changeset=lambda x: changesets[int(x)],
youngest_rev=1)
changesets = [
Mock(Changeset, repos, 0, 'empty', 'joe', t1,
get_changes=lambda: []),
Mock(Changeset, repos, 1, 'first', 'joe', t2,
get_changes=lambda: []),
]
cache = MockCachedRepository(self.env, repos, self.log)
cache.sync_changeset('0') # not cached yet
cache.sync_changeset(u'1') # not cached yet
rows = self.env.db_query(
"SELECT rev,author FROM revision ORDER BY rev")
self.assertEqual(2, len(rows))
self.assertEquals(('0000000000', 'joe'), rows[0])
self.assertEquals(('0000000001', 'joe'), rows[1])
cache.sync_changeset(u'0') # cached
cache.sync_changeset('1') # cached
rows = self.env.db_query(
"SELECT rev,author FROM revision ORDER BY rev")
self.assertEqual(2, len(rows))
self.assertEquals(('0000000000', 'joe'), rows[0])
self.assertEquals(('0000000001', 'joe'), rows[1])
def test_get_changes(self):
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
t2 = datetime(2002, 1, 1, 1, 1, 1, 0, utc)
self.preset_cache(
(('0', to_utimestamp(t1), '', ''), []),
(('1', to_utimestamp(t2), 'joe', 'Import'),
[('trunk', 'D', 'A', None, None),
('trunk/RDME', 'F', 'A', None, None)]),
)
repos = self.get_repos()
cache = CachedRepository(self.env, repos, self.log)
self.assertEqual('1', cache.youngest_rev)
changeset = cache.get_changeset(1)
self.assertEqual('joe', changeset.author)
self.assertEqual('Import', changeset.message)
self.assertEqual(t2, changeset.date)
changes = changeset.get_changes()
self.assertEqual(('trunk', Node.DIRECTORY, Changeset.ADD, None, None),
changes.next())
self.assertEqual(('trunk/RDME', Node.FILE, Changeset.ADD, None, None),
changes.next())
self.assertRaises(StopIteration, changes.next)
def suite():
return unittest.makeSuite(CacheTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
##########################################################################
#
# Copyright (c) 2020, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
import IECoreScene
import GafferTest
import GafferScene
import GafferSceneTest
class ClosestPointSamplerTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
# Build network to perform closest point queries
# from a plane, against a copy of the same plane
# converted to a points primitive. Closest points
# should be exact vertices.
plane = GafferScene.Plane()
plane["dimensions"].setValue( imath.V2f( 2 ) )
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
planeTransform = GafferScene.Transform()
planeTransform["in"].setInput( plane["out"] )
planeTransform["filter"].setInput( planeFilter["out"] )
points = GafferScene.MeshToPoints()
points["in"].setInput( plane["out"] )
points["filter"].setInput( planeFilter["out"] )
pointsTransform = GafferScene.Transform()
pointsTransform["in"].setInput( points["out"] )
pointsTransform["filter"].setInput( planeFilter["out"] )
sampler = GafferScene.ClosestPointSampler()
sampler["in"].setInput( planeTransform["out"] )
sampler["source"].setInput( pointsTransform["out"] )
sampler["filter"].setInput( planeFilter["out"] )
sampler["sourceLocation"].setValue( "/plane" )
sampler["prefix"].setValue( "sampled:" )
self.assertScenesEqual( sampler["out"], plane["out"] )
# Identical transforms. Closest point should
# be the same as the query point.
sampler["primitiveVariables"].setValue( "P" )
self.assertSceneValid( sampler["out"] )
inMesh = sampler["in"].object( "/plane" )
outMesh = sampler["out"].object( "/plane" )
self.assertEqual( set( outMesh.keys() ), set( inMesh.keys() + [ "sampled:P" ] ) )
self.assertEqual( outMesh["sampled:P"], inMesh["P"] )
# Translate source off to one side. A single
# point is the closest for all query points.
pointsTransform["transform"]["translate"].setValue( imath.V3f( 5, 5, 0 ) )
outMesh = sampler["out"].object( "/plane" )
self.assertEqual(
outMesh["sampled:P"].data,
IECore.V3fVectorData(
[ imath.V3f( 4, 4, 0 ) ] * 4,
IECore.GeometricData.Interpretation.Point
)
)
# Translate the plane too. Sampled results should
# be adjusted so that they are relative to the local
# space of the plane.
planeTransform["transform"]["translate"].setValue( imath.V3f( -1, 0, 0 ) )
outMesh = sampler["out"].object( "/plane" )
self.assertEqual(
outMesh["sampled:P"].data,
IECore.V3fVectorData(
[ imath.V3f( 5, 4, 0 ) ] * 4,
IECore.GeometricData.Interpretation.Point
)
)
def testPrimitiveVariableTypes( self ) :
pointsPrimitive = IECoreScene.PointsPrimitive(
IECore.V3fVectorData( [ imath.V3f( 0 ) ] )
)
pointsPrimitive["vector"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ) ],
IECore.GeometricData.Interpretation.Vector
),
)
pointsPrimitive["normal"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData(
[ imath.V3f( 4, 5, 6 ) ],
IECore.GeometricData.Interpretation.Normal
),
)
pointsPrimitive["point"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData(
[ imath.V3f( 4, 5, 6 ) ],
IECore.GeometricData.Interpretation.Point
),
)
pointsPrimitive["uv"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V2fVectorData(
[ imath.V2f( 0, 1 ) ],
IECore.GeometricData.Interpretation.UV
),
)
pointsPrimitive["Cs"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.Color3fVectorData( [ imath.Color3f( 0, 0, 1 ) ] ),
)
pointsPrimitive["float"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 0.5 ] ),
)
pointsPrimitive["int"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 10 ] ),
)
points = GafferScene.ObjectToScene()
points["object"].setValue( pointsPrimitive )
plane = GafferScene.Plane()
plane["transform"]["translate"]["x"].setValue( 1 )
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sampler = GafferScene.ClosestPointSampler()
sampler["in"].setInput( plane["out"] )
sampler["source"].setInput( points["out"] )
sampler["filter"].setInput( planeFilter["out"] )
sampler["sourceLocation"].setValue( "/object" )
sampler["primitiveVariables"].setValue( "*" )
sampler["prefix"].setValue( "sampled:" )
p = sampler["out"].object( "/plane" )
for name in pointsPrimitive.keys() :
primVar = pointsPrimitive[name]
sampledName = "sampled:" + name
self.assertIn( sampledName, p )
sampledPrimVar = p[sampledName]
self.assertIsInstance( sampledPrimVar.data, primVar.data.__class__ )
if hasattr( primVar.data, "getInterpretation" ) :
self.assertEqual( sampledPrimVar.data.getInterpretation(), primVar.data.getInterpretation() )
self.assertEqual( p["sampled:vector"].data[0], imath.V3f( 1, 2, 3 ) )
self.assertEqual( p["sampled:normal"].data[0], imath.V3f( 4, 5, 6 ) )
self.assertEqual( p["sampled:point"].data[0], imath.V3f( 3, 5, 6 ) )
self.assertEqual( p["sampled:uv"].data[0], imath.V2f( 0, 1 ) )
self.assertEqual( p["sampled:Cs"].data[0], imath.Color3f( 0, 0, 1 ) )
self.assertEqual( p["sampled:float"].data[0], 0.5 )
self.assertEqual( p["sampled:int"].data[0], 10 )
def testAdjustBounds( self ) :
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sphere = GafferScene.Sphere()
sampler = GafferScene.ClosestPointSampler()
sampler["in"].setInput( plane["out"] )
sampler["source"].setInput( sphere["out"] )
sampler["sourceLocation"].setValue( "/sphere" )
# Not filtered to anything, so we expect bounds to be passed through.
self.assertEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
# Filtered to something, but no primitive variables specified.
# We expect bounds to be passed through.
sampler["filter"].setInput( planeFilter["out"] )
self.assertEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
# P being sampled. We expect the bounds to change.
sampler["primitiveVariables"].setValue( "P" )
self.assertNotEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
sampler["primitiveVariables"].setValue( "*" )
self.assertNotEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
sampler["primitiveVariables"].setValue( "P uv" )
self.assertNotEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
# Unless there is a prefix being applied, in which case we
# expect a pass through again.
sampler["prefix"].setValue( "sampled:" )
self.assertEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
# And we should be able to explicitly disable bounds updates.
sampler["prefix"].setValue( "" )
self.assertNotEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
sampler["adjustBounds"].setValue( False )
self.assertEqual( sampler["out"].boundHash( "/plane" ), sampler["in"].boundHash( "/plane" ) )
@GafferTest.TestRunner.PerformanceTestMethod()
def testPerformance( self ) :
sphere = GafferScene.Sphere()
plane = GafferScene.Plane()
plane["divisions"].setValue( imath.V2i( 1000, 200 ) )
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sampler = GafferScene.ClosestPointSampler()
sampler["in"].setInput( plane["out"] )
sampler["source"].setInput( sphere["out"] )
sampler["filter"].setInput( planeFilter["out"] )
sampler["sourceLocation"].setValue( "/sphere" )
sampler["primitiveVariables"].setValue( "uv" )
# Precache the input object so we don't include
# it in the performance measurement.
sampler["in"].object( "/plane" )
with GafferTest.TestRunner.PerformanceScope() :
sampler["out"].object( "/plane" )
def testPruneSourceLocation( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
prune = GafferScene.Prune()
prune["in"].setInput( sphere["out"] )
sampler = GafferScene.ClosestPointSampler()
sampler["in"].setInput( plane["out"] )
sampler["source"].setInput( prune["out"] )
sampler["filter"].setInput( planeFilter["out"] )
sampler["sourceLocation"].setValue( "/sphere" )
sampler["primitiveVariables"].setValue( "P" )
sampler["prefix"].setValue( "sampled:" )
self.assertIn( "sampled:P", sampler["out"].object( "/plane" ) )
prune["filter"].setInput( sphereFilter["out"] )
self.assertNotIn( "sampled:P", sampler["out"].object( "/plane" ) )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Leo (Chong Liu)" <HiddenPython@gmail.com>',
]
import datetime
import httplib
from google.appengine.api import users
from google.appengine.ext import db
from soc.logic.models.user import logic as user_logic
from soc.logic.models.sponsor import logic as sponsor_logic
from soc.logic.models.host import logic as host_logic
from soc.logic.models.timeline import logic as timeline_logic
from soc.modules.gsoc.logic.models.mentor import logic as mentor_logic
from soc.modules.gsoc.logic.models.org_admin import logic \
as gsoc_org_admin_logic
from soc.modules.gsoc.logic.models.organization import logic \
as gsoc_organization_logic
from soc.modules.gsoc.logic.models.program import logic as program_logic
from soc.modules.gsoc.logic.models.student import logic as student_logic
from soc.modules.gsoc.logic.models.student_project import logic \
as student_project_logic
from soc.modules.gsoc.logic.models.survey import project_logic \
as project_survey_logic
from soc.modules.gsoc.logic.models.survey import grading_logic \
as grading_survey_logic
from tests.test_utils import DjangoTestCase
from tests.test_utils import MailTestCase
from tests.test_utils import TaskQueueTestCase
class SurveysTasksTest(DjangoTestCase, TaskQueueTestCase, MailTestCase):
"""Tests related to soc.tasks.surveys.
"""
def setUp(self):
"""Set up required for the view tests.
"""
# Setup TaskQueueTestCase and MailTestCase first
super(SurveysTasksTest, self).setUp()
# Create a user for the founder of sponsor
email = "a_sponsor@example.com"
account = users.User(email=email)
link_id = 'a_sponsor_user'
name = 'A Sponsor User'
sponsor_user_properties = {
'account': account,
'link_id': link_id,
'name': name,
}
sponsor_user = user_logic.updateOrCreateFromFields(sponsor_user_properties)
# Create sponsor
link_id = 'a_sponsor'
name = 'A Sponsor'
founder = 'a_founder'
phone = '01234567'
contact_postalcode = 'A postalcode'
description = 'A description'
contact_country = 'United States'
short_name = 'AS'
contact_city = 'A city'
home_page = 'http://www.asponsor.com'
email = 'email@asponsor.com'
sponsor_properties = {
'link_id': link_id,
'name': name,
'short_name': short_name,
'founder': sponsor_user,
'phone': phone,
'description': description,
'contact_country': contact_country,
'contact_city': 'A City',
'contact_street': 'A Street',
'contact_postalcode': contact_postalcode,
'home_page': home_page,
'email': email,
'status': 'active',
}
sponsor = sponsor_logic.updateOrCreateFromFields(sponsor_properties)
# Create a timeline for a program
timeline_properties = {
'link_id': 'a_program',
'scope_path': 'a_sponsor',
'scope': sponsor,
}
timeline = timeline_logic.updateOrCreateFromFields(timeline_properties)
# Create a program for a_sponsor
program_properties = {
'key_name': 'a_sponsor/a_program',
'link_id': 'a_program',
'scope': sponsor,
'scope_path': 'a_sponsor',
'name': 'A Program 2010',
'short_name': 'AP2010',
'group_label': 'AP',
'description': 'This is the program for AP2010.',
'apps_tasks_limit': 42,
'slots': 42,
'allocations_visible': True,
'timeline': timeline,
'status': 'visible',
}
# GSoC program logic does not work: error in updatePredefinedOrgTags
from soc.modules.gsoc.models.program import GSoCProgram
program = GSoCProgram(**program_properties)
program.put()
self.program = program
# Create an organization for a_program
organization_properties = {
'link_id': 'an_org',
'name': 'An Organization',
'short_name': 'AO',
'scope_path': 'a_sponsor/a_program',
'scope': program,
'founder': sponsor_user,
'home_page': 'http://www.an_org.com',
'phone': '1-555-2222',
'description': 'An Organization',
'license_name': 'Apache License',
'ideas': 'http://www.an_org.com/ideas',
'contact_country': contact_country,
'contact_city': 'A City',
'contact_street': 'A Street',
'contact_postalcode': contact_postalcode,
'home_page': home_page,
'email': email,
'status': 'active',
}
organization = gsoc_organization_logic.updateOrCreateFromFields(
organization_properties)
# Create a user for all roles except sponsor
email = "a_role_user@example.com"
account = users.User(email=email)
link_id = 'a_role_user'
name = 'A Role User'
properties = {
'account': account,
'link_id': link_id,
'name': name,
}
key_name = user_logic.getKeyNameFromFields(properties)
role_user = user_logic.updateOrCreateFromKeyName(properties, key_name)
# Create an admin for an_org
gsoc_org_admin_properties = {
'link_id': 'an_org_admin',
'given_name': 'A',
'surname': 'Orgadmin',
'scope_path': organization.scope_path + '/' + organization.link_id,
'scope': organization,
'program': program,
'phone': '1-555-2222',
'email': 'an_org_admin@email.com',
'res_country': 'United States',
'res_city': 'A City',
'res_street': 'A Street',
'res_postalcode': '12345',
'birth_date': db.DateProperty.now(),
'user': role_user,
}
gsoc_org_admin_logic.updateOrCreateFromFields(gsoc_org_admin_properties)
# Create a mentor for an_org
mentor_properties = gsoc_org_admin_properties.copy()
mentor_properties.update({
'link_id': 'a_mentor',
'given_name': 'A',
'surname': 'Mentor',
'email': 'a_mentor@email.com',
})
mentor = mentor_logic.updateOrCreateFromFields(mentor_properties)
self.mentor = mentor
# Create students for a_program
student_properties = gsoc_org_admin_properties.copy()
student_properties.update({
'scope_path': program.scope_path + '/' + program.link_id,
'scope': program,
'program': program,
'given_name': 'A',
'surname': 'Student',
'major': 'A Major',
'name_on_documents': 'A Name on Documents',
'publish_location': True,
'blog': 'http://www.ablog.com/',
'home_page': 'http://www.ahomepage.com/',
'photo_url': 'http://www.astudent.com/aphoto.png',
'expected_graduation': 2011,
'school_country': 'United States',
'school_name': 'A School',
'tshirt_size': 'XS',
'tshirt_style': 'male',
'degree': 'Undergraduate',
'phone': '1650253000',
'can_we_contact_you': True,
'program_knowledge': 'I heard about this program through a friend.'
})
# Create projects for a_program, an_org and a_mentor
project_properties = {
'scope_path': organization.scope_path + '/' + organization.link_id,
'scope': organization,
'title': 'test project',
'abstract': 'test abstract',
'status': 'accepted',
'mentor': mentor,
'program': program,
}
#The string order of students' link_id is the same with that of students
#in the array in order to make sure the last student is handled last
size = 10
self.num_projects = size + 1
num_digits = 0
while True:
size /= 10
num_digits += 1
if size == 0:
break
students, projects = [], []
for i in xrange(self.num_projects):
student_link_id = ('student%0'+str(num_digits)+'d') % i
student_properties.update({
'link_id': student_link_id,
'email': student_link_id + '@email.com',
})
student = student_logic.updateOrCreateFromFields(student_properties)
students.append(student)
project_link_id = ('project%0'+str(num_digits)+'d') % i
project_properties.update({
'link_id': project_link_id,
'student': student,
})
project = student_project_logic.updateOrCreateFromFields(
project_properties)
projects.append(project)
self.students = students
self.projects = projects
# Create a project survey for a_program
link_id = 'a_project_survey'
fields = {'SelectionQ': [u'SelectionQ Option 2 for %s' % link_id,
u'SelectionQ Option 1 for %s' % link_id
],
'PickMultipleQ': ['PickMultipleQ Checkbox 1 for %s' % link_id,
'PickMultipleQ Checkbox 2 for %s' % link_id,
],
'LongQ': 'LongQ Custom Prompt for %s' % link_id,
'ShortQ': 'ShortQ Custom Prompt for %s' % link_id,
}
schema = {u'PickMultipleQ': {'index': 5, 'type': 'pick_multi',
'render': 'multi_checkbox'},
u'LongQ': {'index': 2, 'type': 'long_answer'},
u'ShortQ': {'index': 3, 'type': 'short_answer'},
u'SelectionQ': {'index': 4, 'type': 'selection',
'render': 'single_select'}
}
survey_properties = {
'link_id': link_id,
'scope_path': program.scope_path + '/' + program.link_id,
'scope': None,
'prefix': 'program',
'author': role_user,
'title': 'A Project Survey',
'short_name': 'APS',
'modified_by': role_user,
'is_featured': True,
'fields': fields,
'schema': schema,
'deadline': datetime.datetime.now() + datetime.timedelta(10),
'taking_access': 'student',
}
project_survey = project_survey_logic.updateOrCreateFromFields(
survey_properties)
self.project_survey = project_survey
# Create a grading survey for a_program
link_id = 'a_grading_survey'
survey_properties.update({
'link_id': link_id,
'title': 'A Grading Survey',
'short_name': 'AGS',
'taking_access': 'mentor',
})
grading_survey = grading_survey_logic.updateOrCreateFromFields(
survey_properties)
self.grading_survey = grading_survey
def testSpawnSurveyReminderForProjectThroughPostWithoutCorrectXsrfToken(self):
"""Tests that spawning reminders without a correct XSRF token is forbidden.
Without a correct XSRF token, The attempt to spawn reminders for
project survey is forbidden.
"""
url = 'tasks/surveys/projects/send_reminder/spawn'
postdata = {'program_key': self.program.key().name(),
'project_key': self.projects[0].key().name(),
'survey_key': self.project_survey.key().name(),
'survey_type': 'project'}
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.FORBIDDEN)
def testSpawnSurveyReminderForProjectThroughPostWithCorrectXsrfToken(self):
"""Tests that survey reminders are spawned with a correct XSRF token.
With a correct XSRF token, survey reminders are spawned for all projects.
"""
url = '/tasks/surveys/projects/send_reminder/spawn'
postdata = {'program_key': self.program.key().name(),
'project_key': self.projects[0].key().name(),
'survey_key': self.project_survey.key().name(),
'survey_type': 'project'}
xsrf_token = self.getXsrfToken(url, data=postdata)
postdata.update(xsrf_token=xsrf_token)
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.OK)
task_url = '/tasks/surveys/projects/send_reminder/send'
queue_names = ['mail']
# The first project is not spawned
self.assertTasksInQueue(n=self.num_projects-1, url=task_url,
queue_names=queue_names)
def testSendProjectSurveyReminderForProjectThroughPostWithoutCorrectXsrfToken(
self):
"""Tests that sending reminders without a correct XSRF token is forbidden.
Without correct XSRF token, the attempt to send project survey
reminders is forbidden.
"""
entities = user_logic.getForFields()
count_before = len(entities)
url = '/tasks/surveys/projects/send_reminder/send'
postdata = {'project_key': self.projects[0].key().name(),
'survey_key': self.project_survey.key().name(),
'survey_type': 'project'}
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.FORBIDDEN)
def testSendProjectSurveyReminderForProjectThroughPostWithCorrectXsrfToken(
self):
"""Tests that project survey reminders are sent out with correct XSRF token.
With a correct XSRF token, project survey reminders for a project
are sent to its student.
"""
entities = user_logic.getForFields()
count_before = len(entities)
url = '/tasks/surveys/projects/send_reminder/send'
postdata = {'project_key': self.projects[0].key().name(),
'survey_key': self.project_survey.key().name(),
'survey_type': 'project'}
xsrf_token = self.getXsrfToken(url, data=postdata)
postdata.update(xsrf_token=xsrf_token)
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.OK)
self.assertEmailSent(to=self.students[0].email, html='survey')
def testSendGradingSurveyReminderForProjectThroughPostWithoutCorrectXsrfToken(
self):
"""Tests that sending grading reminders is forbidden without correct token.
Without a correct XSRF token, the attempt to send grading survey
reminders is forbidden.
"""
entities = user_logic.getForFields()
count_before = len(entities)
url = '/tasks/surveys/projects/send_reminder/send'
postdata = {'project_key': self.projects[0].key().name(),
'survey_key': self.grading_survey.key().name(),
'survey_type': 'grading'}
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.FORBIDDEN)
def testSendGradingSurveyReminderForProjectThroughPostWithCorrectXsrfToken(
self):
"""Tests that grading survey reminders are sent out with correct XSRF token.
With correct XSRF token, grading survey reminders for a project
are sent to its mentor.
"""
entities = user_logic.getForFields()
count_before = len(entities)
url = '/tasks/surveys/projects/send_reminder/send'
postdata = {'project_key': self.projects[0].key().name(),
'survey_key': self.grading_survey.key().name(),
'survey_type': 'grading'}
xsrf_token = self.getXsrfToken(url, data=postdata)
postdata.update(xsrf_token=xsrf_token)
response = self.client.post(url, postdata)
self.assertEqual(response.status_code, httplib.OK)
self.assertEmailSent(to=self.mentor.email, html='survey')
|
|
# Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This component manages connections to OpenFlow 1.0 switches.
Because many POX applications use OpenFlow, this component gets some
special treatment, and an attempt is made to load it automatically if
any other component references it during initialization.
"""
from pox.core import core
import pox
import pox.lib.util
from pox.lib.addresses import EthAddr
from pox.lib.revent.revent import EventMixin
import datetime
import time
from pox.lib.socketcapture import CaptureSocket
import pox.openflow.debug
from pox.openflow.util import make_type_to_unpacker_table
from pox.openflow import *
log = core.getLogger()
import socket
import select
# List where the index is an OpenFlow message type (OFPT_xxx), and
# the values are unpack functions that unpack the wire format of that
# type into a message object.
unpackers = make_type_to_unpacker_table()
try:
PIPE_BUF = select.PIPE_BUF
except:
try:
# Try to get it from where PyPy (sometimes) has it
import IN
PIPE_BUF = IN.PIPE_BUF
except:
# (Hopefully) reasonable default
PIPE_BUF = 512
import pox.openflow.libopenflow_01 as of
import threading
import os
import sys
from errno import EAGAIN, ECONNRESET, EADDRINUSE, EADDRNOTAVAIL, EMFILE
import traceback
# handlers for stats replies
def handle_OFPST_DESC (con, parts):
msg = parts[0].body
e = con.ofnexus.raiseEventNoErrors(SwitchDescReceived,con,parts[0],msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(SwitchDescReceived, con, parts[0], msg)
def handle_OFPST_FLOW (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(FlowStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FlowStatsReceived, con, parts, msg)
def handle_OFPST_AGGREGATE (con, parts):
msg = parts[0].body
e = con.ofnexus.raiseEventNoErrors(AggregateFlowStatsReceived, con,
parts[0], msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(AggregateFlowStatsReceived, con, parts[0], msg)
def handle_OFPST_TABLE (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(TableStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(TableStatsReceived, con, parts, msg)
def handle_OFPST_PORT (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(PortStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PortStatsReceived, con, parts, msg)
def handle_OFPST_QUEUE (con, parts):
msg = []
for part in parts:
msg.extend(part.body)
e = con.ofnexus.raiseEventNoErrors(QueueStatsReceived, con, parts, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(QueueStatsReceived, con, parts, msg)
class OpenFlowHandlers (object):
"""
A superclass for a thing which handles incoming OpenFlow messages
The only public part of the interface is that it should have a "handlers"
attribute which is a list where the index is an OFPT and the value is a
function to call for that type with the parameters (connection, msg). Oh,
and the add_handler() method to add a handler.
The default implementation assumes these handler functions are all methods
with the names "handle_<TYPE>" and resolves those into the handlers list
on init.
"""
def __init__ (self):
# A list, where the index is an OFPT, and the value is a function to
# call for that type
self.handlers = []
self._build_table()
def handle_default (self, con, msg):
pass
def add_handler (self, msg_type, handler):
if msg_type >= len(self.handlers):
missing = msg_type - len(self.handlers) + 1
self.handlers.extend([self.handle_default] * missing)
self.handlers[msg_type] = handler
def _build_table (self):
try:
super(OpenFlowHandlers, self)._build_table()
except:
pass
# Set up handlers for incoming OpenFlow messages
# That is, self.ofp_handlers[OFPT_FOO] = self.handle_foo
for fname in dir(self):
h = getattr(self, fname)
if not fname.startswith('handle_'): continue
fname = fname.split('_',1)[1]
if not fname == fname.upper(): continue
assert callable(h)
of_type = of.ofp_type_rev_map.get('OFPT_' + fname)
if of_type is None:
log.error("No OF message type for %s", fname)
continue
from_switch = getattr(of._message_type_to_class.get(of_type),
'_from_switch', False)
assert from_switch, "%s is not switch-to-controller message" % (name,)
self.add_handler(of_type, h)
class DefaultOpenFlowHandlers (OpenFlowHandlers):
"""
Basic OpenFlow message handling functionality
There is generally a single instance of this class which is shared by all
Connections.
"""
@staticmethod
def handle_STATS_REPLY (con, msg):
e = con.ofnexus.raiseEventNoErrors(RawStatsReply, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(RawStatsReply, con, msg)
con._incoming_stats_reply(msg)
@staticmethod
def handle_PORT_STATUS (con, msg): #A
if msg.reason == of.OFPPR_DELETE:
con.ports._forget(msg.desc)
else:
con.ports._update(msg.desc)
e = con.ofnexus.raiseEventNoErrors(PortStatus, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PortStatus, con, msg)
@staticmethod
def handle_PORT_STATS (con,msg):
e = con.ofnexus.raiseEventNoErrors(PortStats, con, msg)
if e is None:
con.raiseEventNoErrors(PortStats,con,msg)
@staticmethod
def handle_PACKET_IN (con, msg): #A
e = con.ofnexus.raiseEventNoErrors(PacketIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PacketIn, con, msg)
@staticmethod
def handle_ERROR (con, msg): #A
err = ErrorIn(con, msg)
e = con.ofnexus.raiseEventNoErrors(err)
if e is None or e.halt != True:
con.raiseEventNoErrors(err)
if err.should_log:
log.error(str(con) + " OpenFlow Error:\n" +
msg.show(str(con) + " Error: ").strip())
@staticmethod
def handle_BARRIER_REPLY (con, msg):
e = con.ofnexus.raiseEventNoErrors(BarrierIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(BarrierIn, con, msg)
@staticmethod
def handle_VENDOR (con, msg):
log.info("Vendor msg: " + str(msg))
@staticmethod
def handle_HELLO (con, msg): #S
#con.msg("HELLO wire protocol " + hex(msg.version))
# Send a features request
msg = of.ofp_features_request()
con.send(msg)
@staticmethod
def handle_ECHO_REPLY (con, msg):
#con.msg("Got echo reply")
pass
@staticmethod
def handle_ECHO_REQUEST (con, msg): #S
reply = msg
reply.header_type = of.OFPT_ECHO_REPLY
con.send(reply)
@staticmethod
def handle_FLOW_REMOVED (con, msg): #A
e = con.ofnexus.raiseEventNoErrors(FlowRemoved, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FlowRemoved, con, msg)
@staticmethod
def handle_FEATURES_REPLY (con, msg):
con.features = msg
con.original_ports._ports = set(msg.ports)
con.ports._reset()
con.dpid = msg.datapath_id # Check this
con.ofnexus._connect(con) #FIXME: Should this be here?
e = con.ofnexus.raiseEventNoErrors(FeaturesReceived, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(FeaturesReceived, con, msg)
@staticmethod
def handle_GET_CONFIG_REPLY (con, msg):
e = con.ofnexus.raiseEventNoErrors(ConfigurationReceived, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(ConfigurationReceived, con, msg)
@staticmethod
def handle_QUEUE_GET_CONFIG_REPLY (con, msg):
#TODO
pass
# Default handlers for connections in connected state
_default_handlers = DefaultOpenFlowHandlers()
class HandshakeOpenFlowHandlers (OpenFlowHandlers):
"""
OpenFlow message handling for the handshake state
"""
# If False, don't send a switch desc request when connecting
request_description = True
def __init__ (self):
self._features_request_sent = False
self._barrier = None
super(HandshakeOpenFlowHandlers, self).__init__()
def handle_BARRIER_REPLY (self, con, msg):
if not self._barrier: return
if msg.xid != self._barrier.xid:
con.dpid = None
con.err("failed connect")
con.disconnect()
else:
self._finish_connecting(con)
def handle_ERROR (self, con, msg): #A
if not self._barrier: return
if msg.xid != self._barrier.xid: return
if msg.type != of.OFPET_BAD_REQUEST: return
if msg.code != of.OFPBRC_BAD_TYPE: return
# Okay, so this is probably an HP switch that doesn't support barriers
# (ugh). We'll just assume that things are okay.
self._finish_connecting(con)
def handle_HELLO (self, con, msg): #S
# Send features and switch desc requests
if not self._features_request_sent:
self._features_request_sent = True
fr = of.ofp_features_request()
if self.request_description:
ss = of.ofp_stats_request()
ss.body = of.ofp_desc_stats_request()
con.send(fr.pack() + ss.pack())
else:
con.send(fr)
@staticmethod
def handle_ECHO_REQUEST (con, msg): #S
reply = msg
reply.header_type = of.OFPT_ECHO_REPLY
con.send(reply)
@staticmethod
def handle_STATS_REPLY (con, msg):
if msg.body and isinstance(msg.body, of.ofp_desc_stats_reply):
con.description = msg.body
def handle_FEATURES_REPLY (self, con, msg):
connecting = con.connect_time == None
con.features = msg
con.original_ports._ports = set(msg.ports)
con.ports._reset()
con.dpid = msg.datapath_id
nexus = core.OpenFlowConnectionArbiter.getNexus(con)
if nexus is None:
# Cancel connection
con.info("No OpenFlow nexus for " +
pox.lib.util.dpidToStr(msg.datapath_id))
con.disconnect()
return
con.ofnexus = nexus
con.ofnexus._connect(con)
#TODO: Add a timeout for finish_connecting
if con.ofnexus.miss_send_len is not None:
con.send(of.ofp_set_config(miss_send_len =
con.ofnexus.miss_send_len))
if con.ofnexus.clear_flows_on_connect:
con.send(of.ofp_flow_mod(match=of.ofp_match(),command=of.OFPFC_DELETE))
self._barrier = of.ofp_barrier_request()
con.send(self._barrier)
# To support old versions of cbench, just finish connecting here.
#self._finish_connecting(con)
def _finish_connecting (self, con):
con.info("connected")
con.connect_time = time.time()
con.handlers = _default_handlers.handlers
con.ofnexus.raiseEventNoErrors(ConnectionHandshakeComplete, con)
e = con.ofnexus.raiseEventNoErrors(ConnectionUp, con, con.features)
if e is None or e.halt != True:
con.raiseEventNoErrors(ConnectionUp, con, con.features)
if con.features:
e = con.ofnexus.raiseEventNoErrors(FeaturesReceived, con, con.features)
if e is None or e.halt != True:
con.raiseEventNoErrors(FeaturesReceived, con, con.features)
statsHandlerMap = {
of.OFPST_DESC : handle_OFPST_DESC,
of.OFPST_FLOW : handle_OFPST_FLOW,
of.OFPST_AGGREGATE : handle_OFPST_AGGREGATE,
of.OFPST_TABLE : handle_OFPST_TABLE,
of.OFPST_PORT : handle_OFPST_PORT,
of.OFPST_QUEUE : handle_OFPST_QUEUE,
}
# Deferred sending should be unusual, so don't worry too much about
# efficiency
class DeferredSender (threading.Thread):
"""
Class that handles sending when a socket write didn't complete
"""
def __init__ (self):
threading.Thread.__init__(self)
core.addListeners(self)
self._dataForConnection = {}
self._lock = threading.RLock()
self._waker = pox.lib.util.makePinger()
self.sending = False
self.start()
def _handle_GoingDownEvent (self, event):
self._waker.ping()
def _sliceup (self, data):
"""
Takes an array of data bytes, and slices into elements of
PIPE_BUF bytes each
"""
out = []
while len(data) > PIPE_BUF:
out.append(data[0:PIPE_BUF])
data = data[PIPE_BUF:]
if len(data) > 0:
out.append(data)
return out
def send (self, con, data):
with self._lock:
self.sending = True
data = self._sliceup(data)
if con not in self._dataForConnection:
self._dataForConnection[con] = data
else:
self._dataForConnection[con].extend(data)
self._waker.ping()
def kill (self, con):
with self._lock:
try:
del self._dataForConnection[con]
except:
pass
self._waker.ping()
def run (self):
while core.running:
with self._lock:
cons = self._dataForConnection.keys()
rlist, wlist, elist = select.select([self._waker], cons, cons, 5)
if not core.running: break
with self._lock:
if len(rlist) > 0:
self._waker.pongAll()
for con in elist:
try:
del self._dataForConnection[con]
except:
pass
for con in wlist:
try:
alldata = self._dataForConnection[con]
while len(alldata):
data = alldata[0]
try:
l = con.sock.send(data)
if l != len(data):
alldata[0] = data[l:]
break
del alldata[0]
except socket.error as (errno, strerror):
if errno != EAGAIN:
con.msg("DeferredSender/Socket error: " + strerror)
con.disconnect()
del self._dataForConnection[con]
break
except:
con.msg("Unknown error doing deferred sending")
break
if len(alldata) == 0:
try:
del self._dataForConnection[con]
if len(self._dataForConnection) == 0:
self.sending = False
break
except:
pass
except:
try:
del self._dataForConnection[con]
except:
pass
class DummyOFNexus (object):
def raiseEventNoErrors (self, event, *args, **kw):
log.warning("%s raised on dummy OpenFlow nexus" % event)
def raiseEvent (self, event, *args, **kw):
log.warning("%s raised on dummy OpenFlow nexus" % event)
def _disconnect (self, dpid):
log.warning("%s disconnected on dummy OpenFlow nexus",
pox.lib.util.dpidToStr(dpid))
_dummyOFNexus = DummyOFNexus()
"""
class FileCloser (object):
def __init__ (self):
from weakref import WeakSet
self.items = WeakSet()
core.addListeners(self)
import atexit
atexit.register(self._handle_DownEvent, None)
def _handle_DownEvent (self, event):
for item in self.items:
try:
item.close()
except Exception:
log.exception("Couldn't close a file while shutting down")
self.items.clear()
_itemcloser = FileCloser()
"""
class OFCaptureSocket (CaptureSocket):
"""
Captures OpenFlow data to a pcap file
"""
def __init__ (self, *args, **kw):
super(OFCaptureSocket,self).__init__(*args, **kw)
self._rbuf = bytes()
self._sbuf = bytes()
self._enabled = True
#_itemcloser.items.add(self)
def _recv_out (self, buf):
if not self._enabled: return
self._rbuf += buf
l = len(self._rbuf)
while l > 4:
if ord(self._rbuf[0]) != of.OFP_VERSION:
log.error("Bad OpenFlow version while trying to capture trace")
self._enabled = False
break
packet_length = ord(self._rbuf[2]) << 8 | ord(self._rbuf[3])
if packet_length > l: break
try:
self._writer.write(False, self._rbuf[:packet_length])
except Exception:
log.exception("Exception while writing controller trace")
self._enabled = False
self._rbuf = self._rbuf[packet_length:]
l = len(self._rbuf)
def _send_out (self, buf, r):
if not self._enabled: return
self._sbuf += buf
l = len(self._sbuf)
while l > 4:
if ord(self._sbuf[0]) != of.OFP_VERSION:
log.error("Bad OpenFlow version while trying to capture trace")
self._enabled = False
break
packet_length = ord(self._sbuf[2]) << 8 | ord(self._sbuf[3])
if packet_length > l: break
try:
self._writer.write(True, self._sbuf[:packet_length])
except Exception:
log.exception("Exception while writing controller trace")
self._enabled = False
self._sbuf = self._sbuf[packet_length:]
l = len(self._sbuf)
class PortCollection (object):
"""
Keeps track of lists of ports and provides nice indexing.
One of the complexities of this class is due to how we get port information
from OpenFlow. We get an initial set of ports during handshake. We then
get updates after that. We actually want to keep the original info around,
but we *usually* are only interested in the "up to date" version with
all the "delta" updates applied. Thus, this collection can "chain" to a
parent collection. The original ports are stored in one collection, and
deltas are applied to a child. It's usually this child which is queried.
If a port is removed from a child, the child *masks* it. If the entry were
simply removed from the child, then when a user queries for it, we might
walk down the chain and find it in a parent which isn't what we want.
NOTE: It's possible this could be simpler by inheriting from UserDict,
but I couldn't swear without looking at UserDict in some detail,
so I just implemented a lot of stuff by hand.
"""
def __init__ (self):
self._ports = set() # Set of ofp_phy_ports
self._masks = set() # port_nos of ports which have been removed
self._chain = None # A parent port collection
def _reset (self):
self._ports.clear()
self._masks.clear()
def _forget (self, port):
# Note that all we really need here is the port_no. We pass an entire
# ofp_phy_port anyway for consistency with _update(), though this could
# be re-evaluated if there's ever another caller of _forget().
self._masks.add(port.port_no)
self._ports = set([p for p in self._ports if p.port_no != port.port_no])
def _update (self, port):
self._masks.discard(port.port_no)
self._ports = set([p for p in self._ports if p.port_no != port.port_no])
self._ports.add(port)
def __str__ (self):
if len(self) == 0:
return "<Ports: Empty>"
l = ["%s:%i"%(p.name,p.port_no) for p in sorted(self.values())]
return "<Ports: %s>" % (", ".join(l),)
def __len__ (self):
return len(self.keys())
def __getitem__ (self, index):
if isinstance(index, (int,long)):
for p in self._ports:
if p.port_no == index:
return p
elif isinstance(index, EthAddr):
for p in self._ports:
if p.hw_addr == index:
return p
else:
for p in self._ports:
if p.name == index:
return p
if self._chain:
p = self._chain[index]
if p.port_no not in self._masks:
return p
raise IndexError("No key %s" % (index,))
def keys (self):
if self._chain:
k = set(self._chain.keys())
k.difference_update(self._masks)
else:
k = set()
k.update([p.port_no for p in self._ports])
return list(k)
def __iter__ (self):
return iter(self.keys())
def iterkeys (self):
return iter(self.keys())
def __contains__ (self, index):
try:
self[index]
return True
except Exception:
pass
return False
def values (self):
return [self[k] for k in self.keys()]
def items (self):
return [(k,self[k]) for k in self.keys()]
def iterkeys (self):
return iter(self.keys())
def itervalues (self):
return iter(self.values())
def iteritems (self):
return iter(self.items())
def has_key (self, k):
return k in self
def get (self, k, default=None):
try:
return self[k]
except IndexError:
return default
def copy (self):
r = PortCollection()
r._ports = set(self.values())
class Connection (EventMixin):
"""
A Connection object represents a single TCP session with an
openflow-enabled switch.
If the switch reconnects, a new connection object is instantiated.
"""
_eventMixin_events = set([
ConnectionUp,
ConnectionDown,
PortStatus,
PacketIn,
ErrorIn,
BarrierIn,
RawStatsReply,
SwitchDescReceived,
FlowStatsReceived,
AggregateFlowStatsReceived,
TableStatsReceived,
PortStatsReceived,
QueueStatsReceived,
FlowRemoved,
FeaturesReceived,
ConfigurationReceived,
PortStats,
])
# Globally unique identifier for the Connection instance
ID = 0
_aborted_connections = 0
def msg (self, m):
#print str(self), m
log.debug(str(self) + " " + str(m))
def err (self, m):
#print str(self), m
log.error(str(self) + " " + str(m))
def info (self, m):
pass
#print str(self), m
log.info(str(self) + " " + str(m))
def __init__ (self, sock):
self._previous_stats = []
self.ofnexus = _dummyOFNexus
self.sock = sock
self.buf = b''
Connection.ID += 1
self.ID = Connection.ID
# DPID of connected switch. None before connection is complete.
self.dpid = None
# Switch features reply. Set during handshake.
self.features = None
# Switch desc stats reply. Set during handshake ordinarily, but may
# be None.
self.description = None
self.disconnected = False
self.disconnection_raised = False
self.connect_time = None
self.idle_time = time.time()
self.send(of.ofp_hello())
self.original_ports = PortCollection()
self.ports = PortCollection()
self.ports._chain = self.original_ports
#TODO: set a time that makes sure we actually establish a connection by
# some timeout
self.unpackers = unpackers
self.handlers = HandshakeOpenFlowHandlers().handlers
@property
def eth_addr (self):
dpid = self.dpid
if self.dpid is None:
raise RuntimeError("eth_addr not available")
return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,))
def fileno (self):
return self.sock.fileno()
def close (self):
self.disconnect('closed')
try:
self.sock.close()
except:
pass
def _do_abort_message (self):
"""
Log a message about aborted (no DPID) disconnects
"""
assert Connection._aborted_connections > 0
msg = str(Connection._aborted_connections) + " connection"
if Connection._aborted_connections != 1: msg += "s"
msg += " aborted"
log.debug(msg)
Connection._aborted_connections = 0
def disconnect (self, msg = 'disconnected', defer_event = False):
"""
disconnect this Connection (usually not invoked manually).
"""
if self.disconnected:
self.msg("already disconnected")
if self.dpid is None:
# If we never got a DPID, log later (coalesce the messages)
Connection._aborted_connections += 1
if Connection._aborted_connections == 1:
core.callDelayed(20, self._do_abort_message)
else:
self.info(msg)
self.disconnected = True
try:
self.ofnexus._disconnect(self.dpid)
except:
pass
if self.dpid is not None:
if not self.disconnection_raised and not defer_event:
self.disconnection_raised = True
self.ofnexus.raiseEventNoErrors(ConnectionDown, self)
self.raiseEventNoErrors(ConnectionDown, self)
try:
#deferredSender.kill(self)
pass
except:
pass
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
pass
#TODO disconnect notification
except:
pass
def send (self, data):
"""
Send data to the switch.
Data should probably either be raw bytes in OpenFlow wire format, or
an OpenFlow controller-to-switch message object from libopenflow.
"""
if self.disconnected: return
if type(data) is not bytes:
# There's actually no reason the data has to be an instance of
# ofp_header, but this check is likely to catch a lot of bugs,
# so we check it anyway.
assert isinstance(data, of.ofp_header)
data = data.pack()
if deferredSender.sending:
log.debug("deferred sender is sending!")
deferredSender.send(self, data)
return
try:
l = self.sock.send(data)
if l != len(data):
self.msg("Didn't send complete buffer.")
data = data[l:]
deferredSender.send(self, data)
except socket.error as (errno, strerror):
if errno == EAGAIN:
self.msg("Out of send buffer space. " +
"Consider increasing SO_SNDBUF.")
deferredSender.send(self, data)
else:
self.msg("Socket error: " + strerror)
self.disconnect(defer_event=True)
def read (self):
"""
Read data from this connection. Generally this is just called by the
main OpenFlow loop below.
Note: This function will block if data is not available.
"""
try:
d = self.sock.recv(2048)
except:
return False
if len(d) == 0:
return False
self.buf += d
buf_len = len(self.buf)
offset = 0
while buf_len - offset >= 8: # 8 bytes is minimum OF message size
# We pull the first four bytes of the OpenFlow header off by hand
# (using ord) to find the version/length/type so that we can
# correctly call libopenflow to unpack it.
ofp_type = ord(self.buf[offset+1])
if ord(self.buf[offset]) != of.OFP_VERSION:
if ofp_type == of.OFPT_HELLO:
# We let this through and hope the other side switches down.
pass
else:
log.warning("Bad OpenFlow version (0x%02x) on connection %s"
% (ord(self.buf[offset]), self))
return False # Throw connection away
msg_length = ord(self.buf[offset+2]) << 8 | ord(self.buf[offset+3])
if buf_len - offset < msg_length: break
new_offset,msg = self.unpackers[ofp_type](self.buf, offset)
assert new_offset - offset == msg_length
offset = new_offset
try:
h = self.handlers[ofp_type]
h(self, msg)
except:
log.exception("%s: Exception while handling OpenFlow message:\n" +
"%s %s", self,self,
("\n" + str(self) + " ").join(str(msg).split('\n')))
continue
if offset != 0:
self.buf = self.buf[offset:]
return True
def _incoming_stats_reply (self, ofp):
# This assumes that you don't receive multiple stats replies
# to different requests out of order/interspersed.
if not ofp.is_last_reply:
if ofp.type not in [of.OFPST_FLOW, of.OFPST_TABLE,
of.OFPST_PORT, of.OFPST_QUEUE]:
log.error("Don't know how to aggregate stats message of type " +
str(ofp.type))
self._previous_stats = []
return
if len(self._previous_stats) != 0:
if ((ofp.xid == self._previous_stats[0].xid) and
(ofp.type == self._previous_stats[0].type)):
self._previous_stats.append(ofp)
else:
log.error("Was expecting continued stats of type %i with xid %i, " +
"but got type %i with xid %i" %
(self._previous_stats_reply.xid,
self._previous_stats_reply.type,
ofp.xid, ofp.type))
self._previous_stats = [ofp]
else:
self._previous_stats = [ofp]
if ofp.is_last_reply:
handler = statsHandlerMap.get(self._previous_stats[0].type, None)
s = self._previous_stats
self._previous_stats = []
if handler is None:
log.warn("No handler for stats of type " +
str(self._previous_stats[0].type))
return
handler(self, s)
def __str__ (self):
#return "[Con " + str(self.ID) + "/" + str(self.dpid) + "]"
if self.dpid is None:
d = str(self.dpid)
else:
d = pox.lib.util.dpidToStr(self.dpid)
return "[%s %i]" % (d, self.ID)
def wrap_socket (new_sock):
fname = datetime.datetime.now().strftime("%Y-%m-%d-%I%M%p")
fname += "_" + new_sock.getpeername()[0].replace(".", "_")
fname += "_" + `new_sock.getpeername()[1]` + ".pcap"
pcapfile = file(fname, "w")
try:
new_sock = OFCaptureSocket(new_sock, pcapfile,
local_addrs=(None,None,6633))
except Exception:
import traceback
traceback.print_exc()
pass
return new_sock
from pox.lib.recoco.recoco import *
class OpenFlow_01_Task (Task):
"""
The main recoco thread for listening to openflow messages
"""
def __init__ (self, port = 6633, address = '0.0.0.0',
ssl_key = None, ssl_cert = None, ssl_ca_cert = None):
"""
Initialize
This listener will be for SSL connections if the SSL params are specified
"""
Task.__init__(self)
self.port = int(port)
self.address = address
self.started = False
self.ssl_key = ssl_key
self.ssl_cert = ssl_cert
self.ssl_ca_cert = ssl_ca_cert
if self.ssl_key or self.ssl_cert or ssl_ca_cert:
global ssl
ssl = None
try:
import ssl as sslmodule
ssl = sslmodule
except:
raise RuntimeError("SSL is not available")
core.addListener(pox.core.GoingUpEvent, self._handle_GoingUpEvent)
def _handle_GoingUpEvent (self, event):
self.start()
def start (self):
if self.started:
return
self.started = True
return super(OpenFlow_01_Task,self).start()
def run (self):
# List of open sockets/connections to select on
sockets = []
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
listener.bind((self.address, self.port))
except socket.error as (errno, strerror):
log.error("Error %i while binding %s:%s: %s",
errno, self.address, self.port, strerror)
if errno == EADDRNOTAVAIL:
log.error(" You may be specifying a local address which is "
"not assigned to any interface.")
elif errno == EADDRINUSE:
log.error(" You may have another controller running.")
log.error(" Use openflow.of_01 --port=<port> to run POX on "
"another port.")
return
listener.listen(16)
listener.setblocking(0)
sockets.append(listener)
log.debug("Listening on %s:%s" %
(self.address, self.port))
con = None
while core.running:
try:
while True:
con = None
rlist, wlist, elist = yield Select(sockets, [], sockets, 5)
if len(rlist) == 0 and len(wlist) == 0 and len(elist) == 0:
if not core.running: break
for con in elist:
if con is listener:
raise RuntimeError("Error on listener socket")
else:
try:
con.close()
except:
pass
try:
sockets.remove(con)
except:
pass
timestamp = time.time()
for con in rlist:
if con is listener:
new_sock = listener.accept()[0]
if self.ssl_key or self.ssl_cert or self.ssl_ca_cert:
cert_reqs = ssl.CERT_REQUIRED
if self.ssl_ca_cert is None:
cert_reqs = ssl.CERT_NONE
new_sock = ssl.wrap_socket(new_sock, server_side=True,
keyfile = self.ssl_key, certfile = self.ssl_cert,
ca_certs = self.ssl_ca_cert, cert_reqs = cert_reqs,
do_handshake_on_connect = False,
suppress_ragged_eofs = True)
#FIXME: We currently do a blocking handshake so that SSL errors
# can't occur out of the blue later. This isn't a good
# thing, but getting around it will take some effort.
try:
new_sock.setblocking(1)
new_sock.do_handshake()
except ssl.SSLError as exc:
if exc.errno == 8 and "EOF occurred" in exc.strerror:
# Annoying, but just ignore
pass
else:
#log.exception("SSL negotiation failed")
log.warn("SSL negotiation failed: " + str(exc))
continue
if pox.openflow.debug.pcap_traces:
new_sock = wrap_socket(new_sock)
new_sock.setblocking(0)
# Note that instantiating a Connection object fires a
# ConnectionUp event (after negotation has completed)
newcon = Connection(new_sock)
sockets.append( newcon )
#print str(newcon) + " connected"
else:
con.idle_time = timestamp
if con.read() is False:
con.close()
sockets.remove(con)
except KeyboardInterrupt:
break
except:
def log_tb ():
log.exception("Exception reading connection " + str(con))
do_break = False # Break OpenFlow loop?
do_close = True # Close this socket?
sock_error = None
if sys.exc_info()[0] is socket.error:
sock_error = sys.exc_info()[1][0]
if con is listener:
do_close = False
if sock_error == ECONNRESET:
con.info("Connection reset")
elif sock_error == EMFILE:
log.error("Couldn't accept connection: out of file descriptors.")
else:
do_close = True
log_tb()
log.error("Exception on OpenFlow listener. Aborting.")
do_break = True
else:
# Normal socket
if sock_error == ECONNRESET:
con.info("Connection reset")
else:
log_tb()
if do_close:
try:
con.close()
except:
pass
try:
sockets.remove(con)
except:
pass
if do_break:
# Leave the OpenFlow loop
break
log.debug("No longer listening for connections")
#pox.core.quit()
# Used by the Connection class
deferredSender = None
def launch (port=6633, address="0.0.0.0", name=None,
private_key=None, certificate=None, ca_cert=None,
__INSTANCE__=None):
"""
Start a listener for OpenFlow connections
If you want to enable SSL, pass private_key/certificate/ca_cert in reasonable
combinations and pointing to reasonable key/cert files. These have the same
meanings as with Open vSwitch's old test controller, but they are more
flexible (e.g., ca-cert can be skipped).
"""
if name is None:
basename = "of_01"
counter = 1
name = basename
while core.hasComponent(name):
counter += 1
name = "%s-%s" % (basename, counter)
if core.hasComponent(name):
log.warn("of_01 '%s' already started", name)
return None
global deferredSender
if not deferredSender:
deferredSender = DeferredSender()
if of._logger is None:
of._logger = core.getLogger('libopenflow_01')
l = OpenFlow_01_Task(port = int(port), address = address,
ssl_key = private_key, ssl_cert = certificate,
ssl_ca_cert = ca_cert)
core.register(name, l)
return l
|
|
import collections
import os
import sys
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.sum import sum
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import normal
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.linear import Linear
from chainer.links.normalization.batch_normalization import BatchNormalization
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class ResNetLayers(link.Chain):
"""A pre-trained CNN model provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-{n-layers}-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable and {n_layers} is replaced
with the specified number of layers given as the first argument to
this constructor. Note that in this case the converted chainer
model is stored on the same directory and automatically used from
the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
n_layers (int): The number of layers of this model. It should be either
50, 101, or 152.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model, n_layers, downsample_fb=False):
super(ResNetLayers, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
conv_kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in the original paper
conv_kwargs = {'initialW': normal.HeNormal(scale=1.0)}
kwargs = conv_kwargs.copy()
kwargs['downsample_fb'] = downsample_fb
if n_layers == 50:
block = [3, 4, 6, 3]
elif n_layers == 101:
block = [3, 4, 23, 3]
elif n_layers == 152:
block = [3, 8, 36, 3]
else:
raise ValueError('The n_layers argument should be either 50, 101,'
' or 152, but {} was given.'.format(n_layers))
with self.init_scope():
self.conv1 = Convolution2D(3, 64, 7, 2, 3, **conv_kwargs)
self.bn1 = BatchNormalization(64)
self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs)
self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs)
self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs)
self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs)
self.fc6 = Linear(2048, 1000)
if pretrained_model and pretrained_model.endswith('.caffemodel'):
_retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers),
pretrained_model, self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, self.bn1, relu]),
('pool1', [lambda x: max_pooling_2d(x, ksize=3, stride=2)]),
('res2', [self.res2]),
('res3', [self.res3]),
('res4', [self.res4]),
('res5', [self.res5]),
('pool5', [_global_average_pooling_2d]),
('fc6', [self.fc6]),
('prob', [softmax]),
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz, n_layers=50):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None, n_layers=n_layers)
if n_layers == 50:
_transfer_resnet50(caffemodel, chainermodel)
elif n_layers == 101:
_transfer_resnet101(caffemodel, chainermodel)
elif n_layers == 152:
_transfer_resnet152(caffemodel, chainermodel)
else:
raise ValueError('The n_layers argument should be either 50, 101,'
' or 152, but {} was given.'.format(n_layers))
npz.save_npz(path_npz, chainermodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of ResNetLayers (50 or 101 or 152 layers)
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['pool5']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
class ResNet50Layers(ResNetLayers):
"""A pre-trained CNN model with 50 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet50 has 25,557,096 trainable parameters, and it's 58% and 43% fewer
than ResNet101 and ResNet152, respectively. On the other hand, the top-5
classification accuracy on ImageNet dataset drops only 0.7% and 1.1% from
ResNet101 and ResNet152, respectively. Therefore, ResNet50 may have the
best balance between the accuracy and the model size. It would be basically
just enough for many cases, but some advanced models for object detection
or semantic segmentation use deeper ones as their building blocks, so these
deeper ResNets are here for making reproduction work easier.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-50-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-50-model.caffemodel'
super(ResNet50Layers, self).__init__(
pretrained_model, 50, downsample_fb)
class ResNet101Layers(ResNetLayers):
"""A pre-trained CNN model with 101 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet101 has 44,549,224 trainable parameters, and it's 43% fewer than
ResNet152 model, while the top-5 classification accuracy on ImageNet
dataset drops 1.1% from ResNet152. For many cases, ResNet50 may have the
best balance between the accuracy and the model size.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-101-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-101-model.caffemodel'
super(ResNet101Layers, self).__init__(
pretrained_model, 101, downsample_fb)
class ResNet152Layers(ResNetLayers):
"""A pre-trained CNN model with 152 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet152 has 60,192,872 trainable parameters, and it's the deepest ResNet
model and it achieves the best result on ImageNet classification task in
`ILSVRC 2015 <http://image-net.org/challenges/LSVRC/2015/results#loc>`_.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-152-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-152-model.caffemodel'
super(ResNet152Layers, self).__init__(
pretrained_model, 152, downsample_fb)
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for ResNets.
Note that you have to call this method before ``forward``
because the pre-trained resnet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
# NOTE: in the original paper they subtract a fixed mean image,
# however, in order to support arbitrary size we instead use the
# mean pixel (rather than mean image) as with VGG team. The mean
# value used in ResNet is slightly different from that of VGG16.
image -= numpy.array(
[103.063, 115.903, 123.152], dtype=dtype)
image = image.transpose((2, 0, 1))
return image
class BuildingBlock(link.Chain):
"""A building block that consists of several Bottleneck layers.
Args:
n_layer (int): Number of layers used in the building block.
in_channels (int): Number of channels of input arrays.
mid_channels (int): Number of channels of intermediate arrays.
out_channels (int): Number of channels of output arrays.
stride (int or tuple of ints): Stride of filter application.
initialW (4-D array): Initial weight value used in
the convolutional layers.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
"""
def __init__(self, n_layer, in_channels, mid_channels,
out_channels, stride, initialW=None, downsample_fb=False):
super(BuildingBlock, self).__init__()
with self.init_scope():
self.a = BottleneckA(
in_channels, mid_channels, out_channels, stride,
initialW, downsample_fb)
self._forward = ["a"]
for i in range(n_layer - 1):
name = 'b{}'.format(i + 1)
bottleneck = BottleneckB(out_channels, mid_channels, initialW)
setattr(self, name, bottleneck)
self._forward.append(name)
def forward(self, x):
for name in self._forward:
l = getattr(self, name)
x = l(x)
return x
class BottleneckA(link.Chain):
"""A bottleneck layer that reduces the resolution of the feature map.
Args:
in_channels (int): Number of channels of input arrays.
mid_channels (int): Number of channels of intermediate arrays.
out_channels (int): Number of channels of output arrays.
stride (int or tuple of ints): Stride of filter application.
initialW (4-D array): Initial weight value used in
the convolutional layers.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
"""
def __init__(self, in_channels, mid_channels, out_channels,
stride=2, initialW=None, downsample_fb=False):
super(BottleneckA, self).__init__()
# In the original MSRA ResNet, stride=2 is on 1x1 convolution.
# In Facebook ResNet, stride=2 is on 3x3 convolution.
stride_1x1, stride_3x3 = (1, stride) if downsample_fb else (stride, 1)
with self.init_scope():
self.conv1 = Convolution2D(
in_channels, mid_channels, 1, stride_1x1, 0, initialW=initialW,
nobias=True)
self.bn1 = BatchNormalization(mid_channels)
self.conv2 = Convolution2D(
mid_channels, mid_channels, 3, stride_3x3, 1,
initialW=initialW, nobias=True)
self.bn2 = BatchNormalization(mid_channels)
self.conv3 = Convolution2D(
mid_channels, out_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = BatchNormalization(out_channels)
self.conv4 = Convolution2D(
in_channels, out_channels, 1, stride, 0, initialW=initialW,
nobias=True)
self.bn4 = BatchNormalization(out_channels)
def forward(self, x):
h1 = relu(self.bn1(self.conv1(x)))
h1 = relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return relu(h1 + h2)
class BottleneckB(link.Chain):
"""A bottleneck layer that maintains the resolution of the feature map.
Args:
in_channels (int): Number of channels of input and output arrays.
mid_channels (int): Number of channels of intermediate arrays.
initialW (4-D array): Initial weight value used in
the convolutional layers.
"""
def __init__(self, in_channels, mid_channels, initialW=None):
super(BottleneckB, self).__init__()
with self.init_scope():
self.conv1 = Convolution2D(
in_channels, mid_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn1 = BatchNormalization(mid_channels)
self.conv2 = Convolution2D(
mid_channels, mid_channels, 3, 1, 1, initialW=initialW,
nobias=True)
self.bn2 = BatchNormalization(mid_channels)
self.conv3 = Convolution2D(
mid_channels, in_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = BatchNormalization(in_channels)
def forward(self, x):
h = relu(self.bn1(self.conv1(x)))
h = relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return relu(h + x)
def _global_average_pooling_2d(x):
n, channel, rows, cols = x.shape
h = average_pooling_2d(x, (rows, cols), stride=1)
h = reshape(h, (n, channel))
return h
def _transfer_components(src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.array[:] = src_conv.W.array
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.array[:] = src_scale.W.array
dst_bn.beta.array[:] = src_scale.bias.b.array
def _transfer_bottleneckA(src, dst, name):
_transfer_components(src, dst.conv1, dst.bn1, name, '2a')
_transfer_components(src, dst.conv2, dst.bn2, name, '2b')
_transfer_components(src, dst.conv3, dst.bn3, name, '2c')
_transfer_components(src, dst.conv4, dst.bn4, name, '1')
def _transfer_bottleneckB(src, dst, name):
_transfer_components(src, dst.conv1, dst.bn1, name, '2a')
_transfer_components(src, dst.conv2, dst.bn2, name, '2b')
_transfer_components(src, dst.conv3, dst.bn3, name, '2c')
def _transfer_block(src, dst, names):
_transfer_bottleneckA(src, dst.a, names[0])
for i, name in enumerate(names[1:]):
dst_bottleneckB = getattr(dst, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
def _transfer_resnet50(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.conv1.b.array[:] = src.conv1.b.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3, ['3a', '3b', '3c', '3d'])
_transfer_block(src, dst.res4, ['4a', '4b', '4c', '4d', '4e', '4f'])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _transfer_resnet101(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3, ['3a', '3b1', '3b2', '3b3'])
_transfer_block(src, dst.res4,
['4a'] + ['4b{}'.format(i) for i in range(1, 23)])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _transfer_resnet152(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3,
['3a'] + ['3b{}'.format(i) for i in range(1, 8)])
_transfer_block(src, dst.res4,
['4a'] + ['4b{}'.format(i) for i in range(1, 36)])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _make_npz(path_npz, path_caffemodel, model, n_layers):
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
if not os.path.exists(path_caffemodel):
raise IOError(
'The pre-trained caffemodel does not exist. Please download it '
'from \'https://github.com/KaimingHe/deep-residual-networks\', '
'and place it on {}'.format(path_caffemodel))
ResNetLayers.convert_caffemodel_to_npz(path_caffemodel, path_npz, n_layers)
npz.load_npz(path_npz, model)
return model
def _retrieve(n_layers, name_npz, name_caffemodel, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
path_caffemodel = os.path.join(root, name_caffemodel)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, path_caffemodel, model, n_layers),
lambda path: npz.load_npz(path, model))
|
|
#This will be for creating new characters
#imports
import os.path
class NewCharacter(object):
def __init__(
self, c_p, s_p, e_p, g_p, p_p,\
paralyzation_poison_death_magic,\
rod_staff_wand,\
petrification_polymorph,\
breath_weapon, spell,\
body_armor, head_gear, shield,\
character_name, char_class, age, sex,\
height, weight, hair, eyes, skin,\
hit_points, experience, max_press,\
open_doors, bend_bars__lift_gates,\
surprise, system_shock, max_henchman,\
loyalty_base, reaction_adjustment,\
death_max, deaths_to_date, resurrection_survival,\
strength, dexterity, constitution, intelligence,\
wisdom, charisma, armor_class_base, armor_class, race,
secondary_skill, secondary_skill_2
):
self.c_p = c_p
self.s_p = s_p
self.e_p = e_p
self.g_p = g_p
self.p_p = p_p
self.paralyzation_poison_death_magic = paralyzation_poison_death_magic
self.rod_staff_wand = rod_staff_wand
self.petrification_polymorph = petrification_polymorph
self.breath_weapon = breath_weapon
self.spell = spell
self.body_armor = body_armor
self.head_gear = head_gear
self.shield = shield
self.character_name = character_name
self.char_class = char_class
self.age = age
self.sex = sex
self.height = height
self.weight = weight
self.hair = hair
self.eyes = eyes
self.skin = skin
self.hit_points = hit_points
self.experience = experience
self.max_press = max_press
self.open_doors = open_doors
self.bend_bars__lift_gates = bend_bars__lift_gates
self.surprise = surprise
self.system_shock = system_shock
self.max_henchman = max_henchman
self.loyalty_base = loyalty_base
self.reaction_adjustment = reaction_adjustment
self.death_max = death_max
self.deaths_to_date = deaths_to_date
self.resurrection_survival = resurrection_survival
self.strength = strength
self.dexterity = dexterity
self.constitution = constitution
self.intelligence = intelligence
self.wisdom = wisdom
self.charisma = charisma
self.armor_class_base = armor_class_base
self.armor_class = armor_class
self.race = race
self.secondary_skill = secondary_skill
self.secondary_skill_2 = secondary_skill_2
def coins(self):
coins = {
"c.p.": 0,
"s.p.": 0,
"e.p.": 0,
"g.p.": 0,
"p.p.": 0,
}
coins['c.p.'] = self.c_p
coins['s.p.'] = self.s_p
coins['e.p.'] = self.e_p
coins['g.p.'] = self.g_p
coins['p.p.'] = self.p_p
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---COINS:---\n")
for key in coins:
f.write(key + ": " + str(coins[key]) + "\n")
f.write("\n")
f.close()
return
def saving_throws(self):
saving_throws = {
"Paralyzation/Poison/Death Magic": 0,
"Rod/Staff/Wand": 0,
"Petrification/Polymorph": 0,
"Breath Weapon": 0,
"Spell": 0,
}
saving_throws["Paralyzation/Poison/Death Magic"] = self.paralyzation_poison_death_magic
saving_throws["Rod/Staff/Wand"] = self.rod_staff_wand
saving_throws["Petrification/Polymorph"] = self.petrification_polymorph
saving_throws["Breath Weapon"] = self.breath_weapon
saving_throws["Spell"] = self.spell
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---SAVING THROWS:---\n")
for key in saving_throws:
f.write(key + ": " + str(saving_throws[key]) + "\n")
f.write("\n")
f.close()
return
def armor_worn(self):
armor_worn = {
"Body Armor": '',
"Head Gear": '',
"Shield": '',
}
armor_worn["Body Armor"] = self.body_armor
armor_worn["Head Gear"] = self.head_gear
armor_worn["Shield"] = self.shield
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---ARMOR:---\n")
for key in armor_worn:
f.write(key + ": " + str(armor_worn[key]) + "\n")
f.write("\n")
f.close()
return
def char_personal(self):
char_personal = {
"Character Name": '',
"Race": '',
"Class": '',
"Age": 0,
"Sex": '',
"Height": '',
"Weight": 0,
"Hair": '',
"Eyes": '',
"Skin": '',
"Hit Points": 0,
"Experience": 0
}
char_personal["Character Name"] = self.character_name
char_personal["Class"] = self.char_class
char_personal["Age"] = self.age
char_personal["Sex"] = self.sex
char_personal["Height"] = self.height
char_personal["Weight"] = self.weight
char_personal["Hair"] = self.hair
char_personal["Eyes"] = self.eyes
char_personal["Skin"] = self.skin
char_personal["Hit Points"] = self.hit_points
char_personal["Experience"] = self.experience
char_personal["Race"] = self.race
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---PERSONAL CHARACTERISTICS:---\n")
for key in char_personal:
f.write(key + ": " + str(char_personal[key]) + "\n")
f.write("\n")
f.close()
return
def char_misc(self):
char_misc = {
"Max Press": 0,
"Open Doors": 0,
"Bend Bars/Lift Gates": 0,
"Surprise": 0,
"System Shock": 0,
"Max # Henchman": 0,
"Loyalty Base": 0,
"Reaction Adjustment": 0,
"DEATH Max #": 0,
"Deaths to date": 0,
"Resurrection Survival": 0,
"Secondary Skill": 0,
"Secondary Skill 2": 0
}
char_misc["Max Press"] = self.max_press
char_misc["Open Doors"] = self.open_doors
char_misc["Bend Bars/Lift Gates"] = self.bend_bars__lift_gates
char_misc["Surprise"] = self.surprise
char_misc["System Shock"] = self.system_shock
char_misc["Max # Henchman"] = self.max_henchman
char_misc["Loyalty Base"] = self.loyalty_base
char_misc["Reaction Adjustment"] = self.reaction_adjustment
char_misc["DEATH Max #"] = self.death_max
char_misc["Deaths to date"] = self.deaths_to_date
char_misc["Resurrection Survival"] = self.resurrection_survival
char_misc["Secondary Skill"] = self.secondary_skill
char_misc["Secondary Skill 2"] =self.secondary_skill_2
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---MISC CHARACTERISTICS:---\n")
for key in char_misc:
f.write(key + ": " + str(char_misc[key]) + "\n")
f.write("\n")
f.close()
return
def char_stats(self):
char_stats = {
"Strength": 0,
"Dexterity": 0,
"Constitution": 0,
"Intelligence": 0,
"Wisdom": 0,
"Charisma": 0,
"Armor Class Base": 0,
"Armor Class": 0
}
char_stats["Strength"] = self.strength
char_stats["Dexterity"] = self.dexterity
char_stats["Constitution"] = self.constitution
char_stats["Intelligence"] = self.intelligence
char_stats["Wisdom"] = self.wisdom
char_stats["Charisma"] = self.charisma
char_stats["Armor Class Base"] = self.armor_class_base
char_stats["Armor Class"] = self.armor_class
f = open(os.path.abspath("Characters/" + str(self.character_name)), 'a')
f.write("\n---STATS:---\n")
for key in char_stats:
f.write(key + ": " + str(char_stats[key]) + "\n")
f.write("\n")
f.close()
return
##########################################################################
##########################################################################
def new_char(char_name, stats):
stat_list = []
for stat in stats:
stat_list.append(stat)
char_name = NewCharacter(
stat_list[0], #c_p
stat_list[1], #s_p
stat_list[2], #e_p
stat_list[3], #g_p
stat_list[4], #p_p
stat_list[5], #paralyzation_poison_death_magic
stat_list[6], #rod_staff_wand
stat_list[7], #petrification_polymorph
stat_list[8], #breath_weapon
stat_list[9], #spell
stat_list[10], #body_armor
stat_list[11], #head_gear
stat_list[12], #shield
stat_list[13], #character_name
stat_list[14], #char_class
stat_list[15], #age
stat_list[16], #sex
stat_list[17], #height
stat_list[18], #weight
stat_list[19], #hair
stat_list[20], #eyes
stat_list[21], #skin
stat_list[22], #hit_points
stat_list[23], #experience
stat_list[24], #max_press
stat_list[25], #open_doors
stat_list[26], #bend_bars__lift_gates
stat_list[27], #surprise
stat_list[28], #system_shock
stat_list[29], #max_henchman
stat_list[30], #loyalty_base
stat_list[31], #reaction_adjustment
stat_list[32], #death_max
stat_list[33], #deaths_to_date
stat_list[34], #resurrection_survival
stat_list[35], #strength
stat_list[36], #dexterity
stat_list[37], #constitution
stat_list[38], #intelligence
stat_list[39], #wisdom
stat_list[40], #charisma
stat_list[41], #armor_class_base
stat_list[42], #armor_class
stat_list[43], #race
stat_list[44], #secondary_skill
stat_list[45], #secondary_skill_2
)
char_name.char_personal()
char_name.armor_worn()
char_name.saving_throws()
char_name.coins()
char_name.char_misc()
char_name.char_stats()
return
|
|
#!/usr/bin/env python
"""
EvoLife Cellular Automaton implementation using CUDA.
Rules are:
- Each living cell has its own birth/sustain ruleset and an energy level;
- Cell is loosing all energy if number of neighbours is not in its sustain rule;
- Cell is born with max energy if there are exactly N neighbours with N in their birth rule;
- Same is applied for living cells (re-occupation case), but only with different genomes;
- If there are several birth situations with different N possible, we choose one with larger N;
- Newly born cell's ruleset calculated as crossover between 'parent' cells rulesets;
- If cell is involved in breeding as a 'parent', it's loosing `BIRTH_COST` units of energy per each non-zero gene passed;
- This doesn't apply in re-occupation case;
- Every turn, cell is loosing `DEATH_SPEED` units of energy;
- Cell with zero energy is dying;
- Cell cannot have more than `MAX_GENES` non-zero genes in ruleset.
Additional rule is: board has torus topology.
So, if all cells initially has B3/S23 ruleset, DEATH_SPEED = BIRTH_COST = 0, MAX_GENES >= 3, we have exact Conway rules.
But if there were more than one ruleset initially, evolution may begin.
There are 2^18 possible rulesets, only a small fraction of which have been
studied in any detail. So, who knows what we may discover with evolutionary rules :)
CONTROLS:
Arrows move field
+/- zoom in/out
]/[ speed up/down
F toggle fullscreen
S dump board state to a file
Q/ESC quit
Prerequisites: pycuda, numpy, scipy, pygame, scikit-image
Debian: apt-get install python-pycuda python-numpy python-pygame python-scipy python-setuptools
Author: a5kin
Copyright: MIT License.
"""
import sys, time, math, colorsys, random, traceback
import pygame
from pygame.locals import *
import numpy as np
from scipy.misc import imsave
import scipy.ndimage.interpolation
from skimage import transform as tf
import importlib
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
from pycuda.elementwise import ElementwiseKernel
try:
expmod = importlib.import_module('experiments2.' + sys.argv[1])
DEATH_SPEED = expmod.DEATH_SPEED
BIRTH_COST = expmod.BIRTH_COST
MAX_GENES = expmod.MAX_GENES
FIELD_WIDTH = expmod.FIELD_WIDTH
FIELD_HEIGHT = expmod.FIELD_HEIGHT
SAVE_FRAMES = expmod.SAVE_FRAMES
DOWNSCALE_FACTOR = expmod.DOWNSCALE_FACTOR
FRAME_SKIP = expmod.FRAME_SKIP
RANDOM_SEED = expmod.RANDOM_SEED
FADE_IN = expmod.FADE_IN
FADE_OUT = expmod.FADE_OUT
fld_init = expmod.fld_init
except (ImportError, IndexError):
print "No experiment preset found, loading default (big_bang)."
DEATH_SPEED = 0
BIRTH_COST = 0
MAX_GENES = 9
FIELD_WIDTH = 1280
FIELD_HEIGHT = 720
SAVE_FRAMES = False
DOWNSCALE_FACTOR = 1
FRAME_SKIP = 1
RANDOM_SEED = None
FADE_IN = 6
FADE_OUT = 6
def fld_init(a):
return np.asarray([[(random.choice([0, 1]) * random.randint(0, 256*512) if (i < 100 and j < 100) else 0) for j in range(a.height)] for i in range(a.width)]).astype(np.int32)
except:
print traceback.format_exc()
sys.exit(0)
step_gpu = ElementwiseKernel("unsigned int *fld, unsigned int *fld_new, unsigned int *seeds, unsigned int *bufs, unsigned int *img, int w, int h", """
int x = i / h;
int y = i % h;
// torus topology emulation
int xm1 = x - 1; if (xm1 < 0) xm1 = w + xm1;
int xp1 = x + 1; if (xp1 >= w) xp1 = xp1 - w;
int ym1 = y - 1; if (ym1 < 0) ym1 = h + ym1;
int yp1 = y + 1; if (yp1 >= h) yp1 = yp1 - h;
// cache neighbours values
uint f0 = fld[i];
uint f1 = fld[xm1 * h + ym1];
uint f2 = fld[x * h + ym1];
uint f3 = fld[xp1 * h + ym1];
uint f4 = fld[xm1 * h + y];
uint f5 = fld[xp1 * h + y];
uint f6 = fld[xm1 * h + yp1];
uint f7 = fld[x * h + yp1];
uint f8 = fld[xp1 * h + yp1];
uint energy = (f0 >> 17);
// total number of neighbours
int N = EXISTS(f1) + EXISTS(f2) + EXISTS(f3) + EXISTS(f4) +
EXISTS(f5) + EXISTS(f6) + EXISTS(f7) + EXISTS(f8);
if (energy >= 0xff || N == 0 || f0 > 0 && (((f0 >> 8) & (1 << N)) == 0)) {
// cell is dying
fld_new[i] = 0;
//img[i] = fadeout(img0, 5);
} else {
uint f00 = f0;
for (int ni = 8; ni > 0; ni--) {
// no re-occupation rule, breeding in empty cells only
//if (f0 > 0) break;
// cache neighbours breeding fitnesses
int ff1 = FIT(f1, ni);
int ff2 = FIT(f2, ni);
int ff3 = FIT(f3, ni);
int ff4 = FIT(f4, ni);
int ff5 = FIT(f5, ni);
int ff6 = FIT(f6, ni);
int ff7 = FIT(f7, ni);
int ff8 = FIT(f8, ni);
if (ff1 + ff2 + ff3 + ff4 + ff5 + ff6 + ff7 + ff8 == ni) {
// neighbours able to breed, cell is born
f0 = 0;
uint gene_num = 0;
//int genes_count = {2};
//int gene;
uint nit = (int) (ni / 2);
uint seed = seeds[i];
uint nonzero_genes_num = 0;
while (gene_num < 17) {
// pseudorandom cross breeding
uint rng = ((((seed + gene_num) * 58321) + 11113)) % 65535;
uint fg1 = (f1 >> gene_num) & ff1;
uint fg2 = (f2 >> gene_num) & ff2;
uint fg3 = (f3 >> gene_num) & ff3;
uint fg4 = (f4 >> gene_num) & ff4;
uint fg5 = (f5 >> gene_num) & ff5;
uint fg6 = (f6 >> gene_num) & ff6;
uint fg7 = (f7 >> gene_num) & ff7;
uint fg8 = (f8 >> gene_num) & ff8;
int n1 = fg1 + fg2 + fg3 + fg4 + fg5 + fg6 + fg7 + fg8;
//if ((int) (n1 * 65535 / ni) < 65535 && (int) (n1 * 65535 / ni) > 0)
// printf("%d %d | ", rng, (int) (n1 * 65535 / ni));
//if (n1 > nit) {
if ((int) (n1 * 65535 / ni) > rng) {
f0 += 1 << gene_num;
nonzero_genes_num += 1;
if ({1}) {
if (fg1) atomicAdd(&bufs[xm1 * h + ym1], ({1} << 17));
if (fg2) atomicAdd(&bufs[x * h + ym1], ({1} << 17));
if (fg3) atomicAdd(&bufs[xp1 * h + ym1], ({1} << 17));
if (fg4) atomicAdd(&bufs[xm1 * h + y], ({1} << 17));
if (fg5) atomicAdd(&bufs[xp1 * h + y], ({1} << 17));
if (fg6) atomicAdd(&bufs[xm1 * h + yp1], ({1} << 17));
if (fg7) atomicAdd(&bufs[x * h + yp1], ({1} << 17));
if (fg8) atomicAdd(&bufs[xp1 * h + yp1], ({1} << 17));
}
}
gene_num++;
}
if (nonzero_genes_num > {2}) f0 = 0;
seeds[i] = (((seed * 58321) + 11113)) % 65535;
//if (f0 != 3076 && f0 != 31820) printf("%d ", f0);
break;
}
}
if ((f00 & 0x1ffff) == (f0 & 0x1ffff)) {
f0 = f00;
if (f0 != 0) {
f0 += ({0} << 17);
}
}
fld_new[i] = f0;
}
""".replace("{0}", str(DEATH_SPEED)).replace("{1}", str(BIRTH_COST)).replace("{2}", str(MAX_GENES)), "ca_step", preamble="""
#include <stdio.h>
#define EXISTS(x) (x > 0 ? 1 : 0)
//#define FIT(x, n) ((n == 0 || (x & (1 << (n - 1))) == 0) ? 0 : 1)
#define FIT(x, n) ((x >> (n - 1)) & 1)
__device__ uint fadeout(int val, int step) {
uint red = (val & 0x00ff0000) >> 16;
if (red > step-1) red -= step; else red = 0;
uint green = (val & 0x0000ff00) >> 8;
if (green > step-1) green -= step; else green = 0;
uint blue = (val & 0x000000ff);
if (blue > step-1) blue -= step; else blue = 0;
return blue + (green << 8) + (red << 16);
}
""")
flush_bufs_gpu = ElementwiseKernel("unsigned int *fld_new, unsigned int *bufs, unsigned int *img, int w, int h", """
uint f0 = fld_new[i];
f0 += bufs[i];
uint energy = (f0 >> 17);
if (energy > 0xff) {
energy = 0xff;
f0 = 0;
}
fld_new[i] = f0;
bufs[i] = 0;
uint img0 = img[i];
uint tc = hsv2rgb((f0 & 0x1ffff) % 360, 0xff - energy, 255);
if (f0 == 0) tc = 0;
int tr = (tc >> 16) & 0xff;
int tg = (tc >> 8) & 0xff;
int tb = tc & 0xff;
int cr = (img0 >> 16) & 0xff;
int cg = (img0 >> 8) & 0xff;
int cb = img0 & 0xff;
cr = max(min(tr, cr + FADE_IN), cr - FADE_OUT);
cg = max(min(tg, cg + FADE_IN), cg - FADE_OUT);
cb = max(min(tb, cb + FADE_IN), cb - FADE_OUT);
img[i] = ((uint) cr << 16) + ((uint) cg << 8) + (uint) cb;
""", "ca_flush", preamble="""
#include <stdio.h>
#define FADE_IN {fade_in}
#define FADE_OUT {fade_out}
__device__ uint hsv2rgb(int hue, int sat, int val) {
float r, g, b;
float h, s, v;
h = hue;
s = fmin(255, (float) sat);
s /= 255;
v = fmin(255, (float) val);
float f = ((float) h) / 60.0f;
float hi = floorf(f);
f = f - hi;
int p = (int) (v * (1 - s));
int q = (int) (v * (1 - s * f));
int t = (int) (v * (1 - s * (1 - f)));
if(hi == 0.0f || hi == 6.0f) {
r = v; g = t; b = p;
} else if (hi == 1.0f) {
r = q; g = v; b = p;
} else if (hi == 2.0f) {
r = p; g = v; b = t;
} else if (hi == 3.0f) {
r = p; g = q; b = v;
} else if (hi == 4.0f) {
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
unsigned int color = b + g * 256 + r * 256 * 256;
return color;
}
""".replace("{fade_in}", str(FADE_IN)).replace("{fade_out}", str(FADE_OUT)))
class EvoLife:
def __init__(self, width=0, height=0, fullscreen=False, saveframes=False, downscale_factor=1, frame_skip=1):
print "Initializing PyGame...",
pygame.init()
self.title = 'EvoLife Cellular Automaton /w CUDA'
self.saveframes = saveframes
self.downscale_factor = downscale_factor
self.movie_frame = 0
pygame.display.set_caption(self.title, 'CUDA Life')
modes = pygame.display.list_modes()
modes.sort()
modes.reverse()
self.width = width if width else modes[0][0]
self.height = height if height else modes[0][1]
self.frame_skip = frame_skip
print "done."
print "Initializing GPU stuff...",
if RANDOM_SEED:
random.seed(RANDOM_SEED)
seeds = np.asarray([[random.randint(1, 50000) for j in range(self.height)] for i in range(self.width)]).astype(np.int32)
bufs = np.zeros((self.width, self.height), dtype=np.int32)
fld = fld_init(self)
self.f1_gpu = gpuarray.to_gpu(fld)
self.f2_gpu = gpuarray.to_gpu(fld.copy())
self.seeds_gpu = gpuarray.to_gpu(seeds)
self.bufs_gpu = gpuarray.to_gpu(bufs)
self.img_gpu = gpuarray.to_gpu(np.asarray([[0 for v in row] for row in fld]).astype(np.int32))
print "done."
print "Initializing display...",
self.srf = pygame.display.set_mode((self.width / self.downscale_factor, self.height / self.downscale_factor))
if fullscreen:
pygame.display.toggle_fullscreen()
print "done: %sx%s." % (self.width / self.downscale_factor, self.height / self.downscale_factor)
self.t = 0
self.zoom = 1
self.dx = 0
self.dy = 0
self.last_checked = time.time()
self.last_t = 0
def genome2str(self, g):
f = ""
for i in xrange(8):
if ((1 << i) & g) != 0:
f += str(i+1)
f += "/"
g = g >> 8
for i in xrange(9):
if ((1 << i) & g) != 0:
f += str(i)
return f
def str2genome(self, s):
g = 0
b, s = s.split("/")
for i in b:
g += (1 << (int(i)-1))
for i in s:
g += (1 << (int(i)+8))
return g
def species_chart(self):
world = self.f1_gpu.get()
species = np.unique(world & 0x1ffff, return_counts=True)
species = zip(species[1][1:], species[0][1:])
species.sort()
species.reverse()
print "SN=%s |" % len(species),
for s in species[:10]:
print "%s (%s) |" % (self.genome2str(s[1]), s[0]),
print
def step(self):
start_time = time.time()
step_gpu(self.f1_gpu, self.f2_gpu, self.seeds_gpu, self.bufs_gpu, self.img_gpu, np.uint32(self.width), np.uint32(self.height))
flush_bufs_gpu(self.f2_gpu, self.bufs_gpu, self.img_gpu, np.uint32(self.width), np.uint32(self.height))
tmp = self.f1_gpu
self.f1_gpu = self.f2_gpu
self.f2_gpu = tmp
self.t += 1
self.last_t += 1
if self.t % self.frame_skip == 0:
dest = self.img_gpu.get()
dest = np.reshape(dest, (self.width, self.height), order='F')
if self.dx:
dest = np.roll(dest, self.dx, axis=1)
if self.dy:
dest = np.roll(dest, self.dy, axis=0)
if self.zoom > 1:
dest = dest[:self.width // self.zoom + 1, :self.height // self.zoom + 1]
dest = dest.repeat(self.zoom, axis=0).repeat(self.zoom, axis=1)
dest = dest[:self.width, :self.height]
if self.downscale_factor != 1:
dest = dest.view(np.uint8).reshape(dest.shape+(4,))[..., :3]
dest = (tf.resize(dest, (self.width / self.downscale_factor, self.height / self.downscale_factor, 3), order=1) * 255).astype(np.int32)
tmp = dest[:,:,0].copy()
dest[:,:,0] = dest[:,:,2]
dest[:,:,2] = tmp
if self.saveframes:
pygame.image.save(self.srf, "movie/frame%s.png" % str(self.movie_frame).zfill(8))
self.movie_frame += 1
pygame.surfarray.blit_array(self.srf, dest)
pygame.display.update()
if self.t % 100 == 0:
self.species_chart()
end_time = time.time()
if end_time - self.last_checked > 1:
elapsed_time = end_time - self.last_checked
pygame.display.set_caption(self.title + " | Step %s: %.2f steps/s @%sx" % (self.t, float(self.last_t) / elapsed_time, self.frame_skip), 'CUDA EvoLife')
self.last_checked = time.time()
self.last_t = 0
def run(self):
while True:
self.step()
events = pygame.event.get()
need_exit = False
for e in events:
if e.type==QUIT or e.type==KEYDOWN and e.key==K_ESCAPE or e.type==KEYDOWN and e.key==K_q:
need_exit = True
break
if e.type==KEYDOWN:
if e.key==K_KP_PLUS or e.key==K_EQUALS:
self.zoom *= 2
if e.key==K_MINUS or e.key==K_KP_MINUS:
self.zoom = max(1, self.zoom / 2)
if e.key==K_RIGHTBRACKET:
self.frame_skip += 5
if e.key==K_LEFTBRACKET:
self.frame_skip = max(1, self.frame_skip - 5)
if e.key==K_UP:
self.dx += 10
if e.key==K_DOWN:
self.dx -= 10
if e.key==K_LEFT:
self.dy += 10
if e.key==K_RIGHT:
self.dy -= 10
if e.key==K_f:
pygame.display.toggle_fullscreen()
if e.key==K_s:
np.save("fields/field.npy", self.f1_gpu.get())
if need_exit:
break
if __name__ == '__main__':
ca = EvoLife(FIELD_WIDTH, FIELD_HEIGHT, saveframes=SAVE_FRAMES, downscale_factor=DOWNSCALE_FACTOR, frame_skip=FRAME_SKIP)
ca.run()
|
|
import numpy as np
#import networkx as nx
from random import choice, random
#import math
import pandas as pd
import vincent
get_ipython().run_cell_magic(u'html', u'', u'<div id="d3-example"></div>\n<style>\n.node {stroke: #fff; stroke-width: 1.5px;}\nmarker {stroke: #999;}\n.link {stroke: #999; stroke-opacity: .3;}\n</style>\n<script src="force.js"></script>')
def edgetokey(e):
(u,v) = e
return '(' + str(u) + ', ' + str(v) + ')'
def isexpensive(G,expensiveedges,e):
if G.is_directed():
return edgetokey(e) in expensiveedges
else:
(u,v) = e
return edgetokey((u,v)) in expensiveedges or edgetokey((v,u)) in expensiveedges
def dictcounter(d, k, v=1):
try:
d[k] += v
except:
d[k] = v
def plotrwresult(G):
visits = [G.node[node]['visits'] for node in G.nodes_iter() ]
norm_visits = np.array(visits)/float(G.graph['total_visits'])
deaths = [G.node[node]['deaths'] for node in G.nodes_iter() ]
norm_deaths = np.array(deaths)/float(sum(deaths))
if G.is_directed():
degrees = [ G.in_degree(node) for node in G.nodes_iter() ]
norm_degrees = np.array(degrees)/float(G.size())
else:
degrees = [ G.degree(node) for node in G.nodes_iter() ]
aorm_degrees = np.array(degrees)/float(2*G.size())
multi_iter1 = {'index':range(G.order()), 'Visits':norm_visits, 'Deaths':norm_deaths, 'Degree':norm_degrees}
line = vincent.Scatter(multi_iter1, iter_idx='index')
line.axis_titles(x='Vertex', y='Juice')
line.legend(title='Results')
line.width = 400
line.height = 300
line.marks[0].marks[0].properties.enter.opacity = vincent.ValueRef(value=1)
line.marks[0].marks[0].properties.update = vincent.PropertySet()
line.marks[0].marks[0].properties.update.size = vincent.ValueRef(value=100)
line.marks[0].marks[0].properties.hover = vincent.PropertySet()
line.marks[0].marks[0].properties.hover.size = vincent.ValueRef(value=200)
line.marks[0].marks[0].properties.update.size = vincent.ValueRef(value=100)
line.marks[0].marks[0].properties.hover = vincent.PropertySet()
line.marks[0].marks[0].properties.hover.size = vincent.ValueRef(value=200)
line.scales['shape'] = vincent.Scale(name='shape', type='ordinal',
domain=vincent.DataRef(data='table', field='data.col'),
range=["square", "circle", "triangle-down", "triangle-up"])
line.marks[0].marks[0].properties.enter.shape = vincent.ValueRef(scale="shape", field="data.col")
line.legends[0].shape = "shape"
return line
def randomwalk(G, frogs, P_die, T=10, expensiveedges = []):
G.graph['teleportations'] = {}
G.graph['waiting'] = {}
for node in G.nodes_iter():
G.node[node]['visits'] = 0
G.node[node]['deaths'] = 0
G.node[node]['frogs'] = 0
G.node[node]['incomingfrogs'] = 0
# Initialize edge traversal counters
for e in G.edges_iter():
G.edge[e[0]][e[1]]['timeline'] = {}
if not G.is_directed():
G.edge[e[0]][e[1]]['frogstosmall'] = 0
G.edge[e[0]][e[1]]['frogstolarge'] = 0
else:
G.edge[e[0]][e[1]]['frogs'] = 0
frogs_left = frogs
time = 0
G.graph['total_visits'] = 0
G.graph['death_times_sum'] = 0
#frog_locations = np.random.randint(0, high = G.number_of_nodes(), size = frogs).tolist()
na = np.array(G.nodes())
frog_locations = na[np.random.randint(0, high=G.order()-1, size=(frogs))]
for i in range(frogs):
G.node[frog_locations[i]]['frogs'] += 1
del frog_locations
while frogs_left:
time +=1
# Deal with old node frogs
for node in G.nodes_iter():
if G.node[node]['frogs'] == 0:
continue
for f in range(G.node[node]['frogs']):
# Flip coin to die
if random() < P_die:
G.node[node]['deaths'] += 1
G.graph['death_times_sum'] += time
frogs_left -= 1
continue
G.graph['total_visits'] +=1
# Node has successors
if len(G[node])>0:
loc = choice(G[node].keys())
if G.is_directed():
G.edge[node][loc]['frogs'] += 1
else:
if loc >= node:
G.edge[node][loc]['frogstolarge'] += 1
else:
G.edge[node][loc]['frogstosmall'] += 1
# Node does not have successors - Teleport
else:
loc = np.random.randint(0, high = G.number_of_nodes())
dictcounter(G.graph['teleportations'],time)
G.node[loc]['incomingfrogs'] += 1
G.node[loc]['visits'] +=1
G.node[node]['frogs'] = 0
# Deal with edge frogs
for (u,v) in G.edges_iter():
# Don't process expensive edges unless time is multiple of T
if isexpensive(G, expensiveedges, (u,v)) and not (time % T == ((u+v) % T)):
#if isexpensive(G, expensiveedges, (u,v)) and not (time % T == 0):
if G.is_directed():
dictcounter(G.graph['waiting'], time, G.edge[u][v]['frogs'])
else:
dictcounter(G.graph['waiting'], time, G.edge[u][v]['frogstosmall'])
dictcounter(G.graph['waiting'], time, G.edge[u][v]['frogstolarge'])
continue
if G.is_directed():
G.node[v]['incomingfrogs'] += G.edge[u][v]['frogs']
dictcounter(G.edge[u][v]['timeline'], time, G.edge[u][v]['frogs'])
G.edge[u][v]['frogs'] = 0
else:
if v >= u:
large = v
small = u
else:
large = u
small = v
G.node[large]['incomingfrogs'] += G.edge[u][v]['frogstolarge']
dictcounter(G.edge[u][v]['timeline'], time, G.edge[u][v]['frogstolarge'])
G.edge[u][v]['frogstolarge'] = 0
G.node[small]['incomingfrogs'] += G.edge[u][v]['frogstosmall']
dictcounter(G.edge[u][v]['timeline'], time, G.edge[u][v]['frogstosmall'])
G.edge[u][v]['frogstosmall'] = 0
# Deal with node incoming frogs
for node in G.nodes_iter():
G.node[node]['frogs'] += G.node[node]['incomingfrogs']
G.node[node]['incomingfrogs'] = 0
G.graph['endtime'] = time
def plotrwtraversal(G, expensiveedges=[], time=None, countfrogs = False):
data = []
index = []
for e in G.edges_iter():
key = edgetokey((e[0], e[1]))
sr = pd.Series(G.edge[e[0]][e[1]]['timeline'])
if not countfrogs:
sr[sr>0] = 1
data.append(sr)
index.append(key)
if time:
data.append(pd.Series(time*[0]))
index.append('hack')
df = pd.DataFrame(data, index=index).T
if time:
del df['hack']
dfexp = df[expensiveedges].copy()
dfcheap = df.copy()
dfcheap.drop(expensiveedges, 1, inplace=True)
dfexpcopy = dfexp.copy()
dfexpcopy[dfexp>0] = 1
cost = (dfexpcopy.sum(axis=1)).sum()
G.graph['cost'] = cost
if len(expensiveedges)>0:
datacost = []
indexcost = []
datacost.append(dfexp.sum(axis=1))
indexcost.append('Expensive')
datacost.append(dfcheap.sum(axis=1))
indexcost.append('Cheap')
dfcost = pd.DataFrame(datacost, index=indexcost).T
finaldf = dfcost
else:
finaldf = df
showteleportations = (len(G.graph['teleportations'])>0) and countfrogs
if showteleportations:
# Count teleportations
finaldf['Teleportation'] = pd.Series(G.graph['teleportations'])
try:
if countfrogs and len(G.graph['waiting'])>0:
finaldf['Waiting'] = pd.Series(G.graph['waiting'])
except:
pass
line = vincent.StackedArea(finaldf)
if not countfrogs:
line.axis_titles(x='Rounds', y='# edges traversed')
else:
line.axis_titles(x='Rounds', y='# frogs')
line.legend(title='Edge')
line.width = 400
line.height = 300
if not countfrogs:
line.scales[1].domain = [0, G.size()]
line.marks[0].marks[0].properties.enter.opacity = vincent.ValueRef(value=0.8)
if len(expensiveedges)>0:
if showteleportations:
line.colors(range_=['#ff0000','#50aa50','#6060aa', '#eeeeee'])
else:
line.colors(range_=['#ff0000','#50aa50', '#eeeeee'])
line.marks[0].marks[0].properties.update = vincent.PropertySet()
line.marks[0].marks[0].properties.update.size = vincent.ValueRef(value=100)
line.marks[0].marks[0].properties.hover = vincent.PropertySet()
line.marks[0].marks[0].properties.hover.size = vincent.ValueRef(value=200)
line.marks[0].marks[0].properties.update.opacity = vincent.ValueRef(value=0.8)
line.marks[0].marks[0].properties.hover.opacity = vincent.ValueRef(value=1)
return line, finaldf
# ------------------------------------------------------
def randomwalkold(G, frogs, P_die):
frog_locations = np.random.randint(0, high = G.number_of_nodes(), size = frogs).tolist()
G.graph['teleportations'] = {}
for node in G.nodes_iter():
G.node[node]['visits'] = 0
G.node[node]['deaths'] = 0
# Initialize edge traversal counters
for e in G.edges_iter():
G.edge[e[0]][e[1]]['timeline'] = {}
frogs_left = frogs
time = 0
G.graph['total_visits'] = 0
G.graph['death_times_sum'] = 0
while frogs_left:
time +=1
for f in range(frogs):
curloc = frog_locations[f]
# Skip dead frogs
if curloc == None:
continue
# Flip coin to die
if random() < P_die:
G.node[curloc]['deaths'] += 1
G.graph['death_times_sum'] += time
frog_locations[f] = None
frogs_left -= 1
continue
G.graph['total_visits'] +=1
# Node has successors
if len(G[curloc])>0:
frog_locations[f] = choice(G[curloc].keys())
loc = frog_locations[f]
dictcounter(G.edge[curloc][loc]['timeline'], time)
# Node does not have successors - Teleport
else:
frog_locations[f] = np.random.randint(0, high = G.number_of_nodes())
dictcounter(G.graph['teleportations'],time)
G.node[frog_locations[f]]['visits'] +=1
G.graph['endtime'] = time
|
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, copy, os, pickle, warnings
from twisted.trial import unittest, util
from twisted.application import service, internet, app
from twisted.persisted import sob
from twisted.python import log, usage
from twisted.python.util import sibpath
from twisted.internet import interfaces, defer
from twisted.protocols import wire, basic
from twisted.internet import protocol, reactor
from twisted.internet.utils import getProcessOutputAndValue
from twisted.application import reactors
try:
from twisted.web import microdom
gotMicrodom = True
except ImportError:
warnings.warn("Not testing xml persistence as twisted.web.microdom "
"not available")
gotMicrodom = False
oldAppSuppressions = [util.suppress(message='twisted.internet.app is deprecated',
category=DeprecationWarning)]
class Dummy:
processName=None
class TestService(unittest.TestCase):
def testName(self):
s = service.Service()
s.setName("hello")
self.failUnlessEqual(s.name, "hello")
def testParent(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
def testApplicationAsParent(self):
s = service.Service()
p = service.Application("")
s.setServiceParent(p)
self.failUnlessEqual(list(service.IServiceCollection(p)), [s])
self.failUnlessEqual(s.parent, service.IServiceCollection(p))
def testNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
self.failUnlessEqual(p.getServiceNamed("hello"), s)
def testDoublyNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
self.failUnlessRaises(RuntimeError, s.setName, "lala")
def testDuplicateNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
s = service.Service()
s.setName("hello")
self.failUnlessRaises(RuntimeError, s.setServiceParent, p)
def testDisowning(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
s.disownServiceParent()
self.failUnlessEqual(list(p), [])
self.failUnlessEqual(s.parent, None)
def testRunning(self):
s = service.Service()
self.assert_(not s.running)
s.startService()
self.assert_(s.running)
s.stopService()
self.assert_(not s.running)
def testRunningChildren(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.assert_(not s.running)
self.assert_(not p.running)
p.startService()
self.assert_(s.running)
self.assert_(p.running)
p.stopService()
self.assert_(not s.running)
self.assert_(not p.running)
def testRunningChildren(self):
s = service.Service()
def checkRunning():
self.assert_(s.running)
t = service.Service()
t.stopService = checkRunning
t.startService = checkRunning
p = service.MultiService()
s.setServiceParent(p)
t.setServiceParent(p)
p.startService()
p.stopService()
def testAddingIntoRunning(self):
p = service.MultiService()
p.startService()
s = service.Service()
self.assert_(not s.running)
s.setServiceParent(p)
self.assert_(s.running)
s.disownServiceParent()
self.assert_(not s.running)
def testPrivileged(self):
s = service.Service()
def pss():
s.privilegedStarted = 1
s.privilegedStartService = pss
s1 = service.Service()
p = service.MultiService()
s.setServiceParent(p)
s1.setServiceParent(p)
p.privilegedStartService()
self.assert_(s.privilegedStarted)
def testCopying(self):
s = service.Service()
s.startService()
s1 = copy.copy(s)
self.assert_(not s1.running)
self.assert_(s.running)
if hasattr(os, "getuid"):
curuid = os.getuid()
curgid = os.getgid()
else:
curuid = curgid = 0
class TestProcess(unittest.TestCase):
def testID(self):
p = service.Process(5, 6)
self.assertEqual(p.uid, 5)
self.assertEqual(p.gid, 6)
def testDefaults(self):
p = service.Process(5)
self.assertEqual(p.uid, 5)
self.assertEqual(p.gid, None)
p = service.Process(gid=5)
self.assertEqual(p.uid, None)
self.assertEqual(p.gid, 5)
p = service.Process()
self.assertEqual(p.uid, None)
self.assertEqual(p.gid, None)
def testProcessName(self):
p = service.Process()
self.assertEqual(p.processName, None)
p.processName = 'hello'
self.assertEqual(p.processName, 'hello')
class TestInterfaces(unittest.TestCase):
def testService(self):
self.assert_(service.IService.providedBy(service.Service()))
def testMultiService(self):
self.assert_(service.IService.providedBy(service.MultiService()))
self.assert_(service.IServiceCollection.providedBy(service.MultiService()))
def testProcess(self):
self.assert_(service.IProcess.providedBy(service.Process()))
class TestApplication(unittest.TestCase):
def testConstructor(self):
service.Application("hello")
service.Application("hello", 5)
service.Application("hello", 5, 6)
def testProcessComponent(self):
a = service.Application("hello")
self.assertEqual(service.IProcess(a).uid, None)
self.assertEqual(service.IProcess(a).gid, None)
a = service.Application("hello", 5)
self.assertEqual(service.IProcess(a).uid, 5)
self.assertEqual(service.IProcess(a).gid, None)
a = service.Application("hello", 5, 6)
self.assertEqual(service.IProcess(a).uid, 5)
self.assertEqual(service.IProcess(a).gid, 6)
def testServiceComponent(self):
a = service.Application("hello")
self.assert_(service.IService(a) is service.IServiceCollection(a))
self.assertEqual(service.IService(a).name, "hello")
self.assertEqual(service.IService(a).parent, None)
def testPersistableComponent(self):
a = service.Application("hello")
p = sob.IPersistable(a)
self.assertEqual(p.style, 'pickle')
self.assertEqual(p.name, 'hello')
self.assert_(p.original is a)
class TestLoading(unittest.TestCase):
def test_simpleStoreAndLoad(self):
a = service.Application("hello")
p = sob.IPersistable(a)
for style in 'xml source pickle'.split():
if style == 'xml' and not gotMicrodom:
continue
p.setStyle(style)
p.save()
a1 = service.loadApplication("hello.ta"+style[0], style)
self.assertEqual(service.IService(a1).name, "hello")
open("hello.tac", 'w').writelines([
"from twisted.application import service\n",
"application = service.Application('hello')\n",
])
a1 = service.loadApplication("hello.tac", 'python')
self.assertEqual(service.IService(a1).name, "hello")
class TestAppSupport(unittest.TestCase):
def testPassphrase(self):
self.assertEqual(app.getPassphrase(0), None)
def testLoadApplication(self):
a = service.Application("hello")
baseconfig = {'file': None, 'xml': None, 'source': None, 'python':None}
for style in 'source xml pickle'.split():
if style == 'xml' and not gotMicrodom:
continue
config = baseconfig.copy()
config[{'pickle': 'file'}.get(style, style)] = 'helloapplication'
sob.IPersistable(a).setStyle(style)
sob.IPersistable(a).save(filename='helloapplication')
a1 = app.getApplication(config, None)
self.assertEqual(service.IService(a1).name, "hello")
config = baseconfig.copy()
config['python'] = 'helloapplication'
open("helloapplication", 'w').writelines([
"from twisted.application import service\n",
"application = service.Application('hello')\n",
])
a1 = app.getApplication(config, None)
self.assertEqual(service.IService(a1).name, "hello")
def test_convertStyle(self):
appl = service.Application("lala")
for instyle in 'xml source pickle'.split():
if instyle == 'xml' and not gotMicrodom:
continue
for outstyle in 'xml source pickle'.split():
if outstyle == 'xml' and not gotMicrodom:
continue
sob.IPersistable(appl).setStyle(instyle)
sob.IPersistable(appl).save(filename="converttest")
app.convertStyle("converttest", instyle, None,
"converttest.out", outstyle, 0)
appl2 = service.loadApplication("converttest.out", outstyle)
self.assertEqual(service.IService(appl2).name, "lala")
def test_getLogFile(self):
os.mkdir("logfiledir")
l = app.getLogFile(os.path.join("logfiledir", "lala"))
self.assertEqual(l.path,
os.path.abspath(os.path.join("logfiledir", "lala")))
self.assertEqual(l.name, "lala")
self.assertEqual(l.directory, os.path.abspath("logfiledir"))
def test_startApplication(self):
appl = service.Application("lala")
app.startApplication(appl, 0)
self.assert_(service.IService(appl).running)
class Foo(basic.LineReceiver):
def connectionMade(self):
self.transport.write('lalala\r\n')
def lineReceived(self, line):
self.factory.line = line
self.transport.loseConnection()
def connectionLost(self, reason):
self.factory.d.callback(self.factory.line)
class DummyApp:
processName = None
def addService(self, service):
self.services[service.name] = service
def removeService(self, service):
del self.services[service.name]
class TimerTarget:
def __init__(self):
self.l = []
def append(self, what):
self.l.append(what)
class TestEcho(wire.Echo):
def connectionLost(self, reason):
self.d.callback(True)
class TestInternet2(unittest.TestCase):
def testTCP(self):
s = service.MultiService()
s.startService()
factory = protocol.ServerFactory()
factory.protocol = TestEcho
TestEcho.d = defer.Deferred()
t = internet.TCPServer(0, factory)
t.setServiceParent(s)
num = t._port.getHost().port
factory = protocol.ClientFactory()
factory.d = defer.Deferred()
factory.protocol = Foo
factory.line = None
internet.TCPClient('127.0.0.1', num, factory).setServiceParent(s)
factory.d.addCallback(self.assertEqual, 'lalala')
factory.d.addCallback(lambda x : s.stopService())
factory.d.addCallback(lambda x : TestEcho.d)
return factory.d
def testUDP(self):
if not interfaces.IReactorUDP(reactor, None):
raise unittest.SkipTest, "This reactor does not support UDP sockets"
p = protocol.DatagramProtocol()
t = internet.TCPServer(0, p)
t.startService()
num = t._port.getHost().port
def onStop(ignored):
t = internet.TCPServer(num, p)
t.startService()
return t.stopService()
return defer.maybeDeferred(t.stopService).addCallback(onStop)
def testPrivileged(self):
factory = protocol.ServerFactory()
factory.protocol = TestEcho
TestEcho.d = defer.Deferred()
t = internet.TCPServer(0, factory)
t.privileged = 1
t.privilegedStartService()
num = t._port.getHost().port
factory = protocol.ClientFactory()
factory.d = defer.Deferred()
factory.protocol = Foo
factory.line = None
c = internet.TCPClient('127.0.0.1', num, factory)
c.startService()
factory.d.addCallback(self.assertEqual, 'lalala')
factory.d.addCallback(lambda x : c.stopService())
factory.d.addCallback(lambda x : t.stopService())
factory.d.addCallback(lambda x : TestEcho.d)
return factory.d
def testConnectionGettingRefused(self):
factory = protocol.ServerFactory()
factory.protocol = wire.Echo
t = internet.TCPServer(0, factory)
t.startService()
num = t._port.getHost().port
t.stopService()
d = defer.Deferred()
factory = protocol.ClientFactory()
factory.clientConnectionFailed = lambda *args: d.callback(None)
c = internet.TCPClient('127.0.0.1', num, factory)
c.startService()
return d
def testUNIX(self):
# FIXME: This test is far too dense. It needs comments.
# -- spiv, 2004-11-07
if not interfaces.IReactorUNIX(reactor, None):
raise unittest.SkipTest, "This reactor does not support UNIX domain sockets"
s = service.MultiService()
s.startService()
factory = protocol.ServerFactory()
factory.protocol = TestEcho
TestEcho.d = defer.Deferred()
t = internet.UNIXServer('echo.skt', factory)
t.setServiceParent(s)
factory = protocol.ClientFactory()
factory.protocol = Foo
factory.d = defer.Deferred()
factory.line = None
internet.UNIXClient('echo.skt', factory).setServiceParent(s)
factory.d.addCallback(self.assertEqual, 'lalala')
factory.d.addCallback(lambda x : s.stopService())
factory.d.addCallback(lambda x : TestEcho.d)
factory.d.addCallback(self._cbTestUnix, factory, s)
return factory.d
def _cbTestUnix(self, ignored, factory, s):
TestEcho.d = defer.Deferred()
factory.line = None
factory.d = defer.Deferred()
s.startService()
factory.d.addCallback(self.assertEqual, 'lalala')
factory.d.addCallback(lambda x : s.stopService())
factory.d.addCallback(lambda x : TestEcho.d)
return factory.d
def testVolatile(self):
if not interfaces.IReactorUNIX(reactor, None):
raise unittest.SkipTest, "This reactor does not support UNIX domain sockets"
factory = protocol.ServerFactory()
factory.protocol = wire.Echo
t = internet.UNIXServer('echo.skt', factory)
t.startService()
self.failIfIdentical(t._port, None)
t1 = copy.copy(t)
self.assertIdentical(t1._port, None)
t.stopService()
self.assertIdentical(t._port, None)
self.failIf(t.running)
factory = protocol.ClientFactory()
factory.protocol = wire.Echo
t = internet.UNIXClient('echo.skt', factory)
t.startService()
self.failIfIdentical(t._connection, None)
t1 = copy.copy(t)
self.assertIdentical(t1._connection, None)
t.stopService()
self.assertIdentical(t._connection, None)
self.failIf(t.running)
def testStoppingServer(self):
if not interfaces.IReactorUNIX(reactor, None):
raise unittest.SkipTest, "This reactor does not support UNIX domain sockets"
factory = protocol.ServerFactory()
factory.protocol = wire.Echo
t = internet.UNIXServer('echo.skt', factory)
t.startService()
t.stopService()
self.failIf(t.running)
factory = protocol.ClientFactory()
d = defer.Deferred()
factory.clientConnectionFailed = lambda *args: d.callback(None)
reactor.connectUNIX('echo.skt', factory)
return d
def testPickledTimer(self):
target = TimerTarget()
t0 = internet.TimerService(1, target.append, "hello")
t0.startService()
s = pickle.dumps(t0)
t0.stopService()
t = pickle.loads(s)
self.failIf(t.running)
def testBrokenTimer(self):
d = defer.Deferred()
t = internet.TimerService(1, lambda: 1 / 0)
oldFailed = t._failed
def _failed(why):
oldFailed(why)
d.callback(None)
t._failed = _failed
t.startService()
d.addCallback(lambda x : t.stopService)
d.addCallback(lambda x : self.assertEqual(
[ZeroDivisionError],
[o.value.__class__ for o in log.flushErrors(ZeroDivisionError)]))
return d
def testEverythingThere(self):
trans = 'TCP UNIX SSL UDP UNIXDatagram Multicast'.split()
for tran in trans[:]:
if not getattr(interfaces, "IReactor"+tran)(reactor, None):
trans.remove(tran)
if interfaces.IReactorArbitrary(reactor, None) is not None:
trans.insert(0, "Generic")
for tran in trans:
for side in 'Server Client'.split():
if tran == "Multicast" and side == "Client":
continue
self.assert_(hasattr(internet, tran+side))
method = getattr(internet, tran+side).method
prefix = {'Server': 'listen', 'Client': 'connect'}[side]
self.assert_(hasattr(reactor, prefix+method) or
(prefix == "connect" and method == "UDP"))
o = getattr(internet, tran+side)()
self.assertEqual(service.IService(o), o)
class TestTimerBasic(unittest.TestCase):
def testTimerRuns(self):
d = defer.Deferred()
self.t = internet.TimerService(1, d.callback, 'hello')
self.t.startService()
d.addCallback(self.assertEqual, 'hello')
d.addCallback(lambda x : self.t.stopService())
d.addCallback(lambda x : self.failIf(self.t.running))
return d
def tearDown(self):
return self.t.stopService()
def testTimerRestart(self):
# restart the same TimerService
d1 = defer.Deferred()
d2 = defer.Deferred()
work = [(d2, "bar"), (d1, "foo")]
def trigger():
d, arg = work.pop()
d.callback(arg)
self.t = internet.TimerService(1, trigger)
self.t.startService()
def onFirstResult(result):
self.assertEqual(result, 'foo')
return self.t.stopService()
def onFirstStop(ignored):
self.failIf(self.t.running)
self.t.startService()
return d2
def onSecondResult(result):
self.assertEqual(result, 'bar')
self.t.stopService()
d1.addCallback(onFirstResult)
d1.addCallback(onFirstStop)
d1.addCallback(onSecondResult)
return d1
def testTimerLoops(self):
l = []
def trigger(data, number, d):
l.append(data)
if len(l) == number:
d.callback(l)
d = defer.Deferred()
self.t = internet.TimerService(0.01, trigger, "hello", 10, d)
self.t.startService()
d.addCallback(self.assertEqual, ['hello'] * 10)
d.addCallback(lambda x : self.t.stopService())
return d
class PluggableReactorTestCase(unittest.TestCase):
"""
Tests for the reactor discovery/inspection APIs.
"""
def setUp(self):
"""
Override the L{reactors.getPlugins} function, normally bound to
L{twisted.plugin.getPlugins}, in order to control which
L{IReactorInstaller} plugins are seen as available.
"""
self.pluginCalls = []
self.pluginResults = []
self.originalFunction = reactors.getPlugins
reactors.getPlugins = self._getPlugins
def tearDown(self):
"""
Restore the original L{reactors.getPlugins}.
"""
reactors.getPlugins = self.originalFunction
def _getPlugins(self, interface, package=None):
"""
Stand-in for the real getPlugins method which records its arguments
and returns a fixed result.
"""
self.pluginCalls.append((interface, package))
return list(self.pluginResults)
def test_getPluginReactorTypes(self):
"""
Test that reactor plugins are returned from L{getReactorTypes}
"""
name = 'fakereactortest'
package = __name__ + '.fakereactor'
description = 'description'
self.pluginResults = [reactors.Reactor(name, package, description)]
reactorTypes = reactors.getReactorTypes()
self.assertEqual(
self.pluginCalls,
[(reactors.IReactorInstaller, None)])
for r in reactorTypes:
if r.shortName == name:
self.assertEqual(r.description, description)
break
else:
self.fail("Reactor plugin not present in getReactorTypes() result")
def test_reactorInstallation(self):
"""
Test that L{reactors.Reactor.install} loads the correct module and
calls its install attribute.
"""
installed = []
global install
def install():
installed.append(True)
installer = reactors.Reactor('fakereactortest', __name__, 'described')
installer.install()
self.assertEqual(installed, [True])
def test_installReactor(self):
"""
Test that the L{reactors.installReactor} function correctly installs
the specified reactor.
"""
installed = []
global install
def install():
installed.append(True)
name = 'fakereactortest'
package = __name__
description = 'description'
self.pluginResults = [reactors.Reactor(name, package, description)]
reactors.installReactor(name)
self.assertEqual(installed, [True])
def test_installNonExistentReactor(self):
"""
Test that L{reactors.installReactor} raises L{reactors.NoSuchReactor}
when asked to install a reactor which it cannot find.
"""
self.pluginResults = []
self.assertRaises(
reactors.NoSuchReactor,
reactors.installReactor, 'somereactor')
def test_reactorSelectionMixin(self):
"""
Test that the reactor selected is installed as soon as possible, ie
when the option is parsed.
"""
executed = []
INSTALL_EVENT = 'reactor installed'
SUBCOMMAND_EVENT = 'subcommands loaded'
class ReactorSelectionOptions(usage.Options, app.ReactorSelectionMixin):
def subCommands(self):
executed.append(SUBCOMMAND_EVENT)
return [('subcommand', None, lambda: self, 'test subcommand')]
subCommands = property(subCommands)
global install
def install():
executed.append(INSTALL_EVENT)
self.pluginResults = [reactors.Reactor('fakereactortest', __name__, 'described')]
options = ReactorSelectionOptions()
options.parseOptions(['--reactor', 'fakereactortest', 'subcommand'])
self.assertEqual(executed[0], INSTALL_EVENT)
self.assertEqual(executed.count(INSTALL_EVENT), 1)
def test_qtStub(self):
"""
Test that installing qtreactor when it's absent fails properly.
"""
scriptPath = sibpath(__file__, "app_qtstub.py")
def _checkOutput((output, err, code)):
self.failIf(output, output)
result = getProcessOutputAndValue(
sys.executable,
args=(sys.executable, scriptPath),
env=None)
result.addCallback(_checkOutput)
return result
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import power_state
import nova.db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests import utils
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id, 'power_state': power_state.SHUTDOWN,
'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroups(test.TestCase):
def setUp(self):
super(TestSecurityGroups, self).setUp()
self.controller = security_groups.SecurityGroupController()
self.server_controller = (
security_groups.ServerSecurityGroupController())
self.manager = security_groups.SecurityGroupActionController()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups + 1):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEquals(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEquals(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEquals(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id, columns_to_join=None):
self.assertEquals(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEquals(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEquals(res_dict, expected)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEquals(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values):
self.assertEquals(sg_update['id'], group_id)
self.assertEquals(sg_update['name'], values['name'])
self.assertEquals(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stubs.Set(nova.db, 'security_group_update',
return_update_security_group)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.update(req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEquals(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEquals(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEquals(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
class TestSecurityGroupRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupRules, self).setUp()
self.controller = security_groups.SecurityGroupController()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.NotFound()
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
self.controller = security_groups.SecurityGroupRulesController()
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEquals(security_group_rule['from_port'], 81)
self.assertEquals(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEquals(security_group_rule['from_port'], None)
self.assertEquals(security_group_rule['to_port'], None)
self.assertEquals(security_group_rule['group']['name'], 'test')
self.assertEquals(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEquals(security_group_rule['ip_protocol'], 'ICMP')
self.assertEquals(security_group_rule['from_port'], -1)
self.assertEquals(security_group_rule['to_port'], -1)
self.assertEquals(security_group_rule['group']['name'], 'test')
self.assertEquals(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEquals(security_group_rule['ip_protocol'], 'TCP')
self.assertEquals(security_group_rule['from_port'], 1)
self.assertEquals(security_group_rule['to_port'], 65535)
self.assertEquals(security_group_rule['group']['name'], 'test')
self.assertEquals(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEquals(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertTrue(security_group_rule == expected_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertTrue(security_group_rule['ip_protocol'] == proto)
self.assertTrue(security_group_rule['from_port'] == from_port)
self.assertTrue(security_group_rule['to_port'] == to_port)
self.assertTrue(security_group_rule == expected_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupRulesXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<ip_protocol>tcp</ip_protocol>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"ip_protocol": "tcp",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_protocol_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEquals(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group name="test">
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
"description": "test",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_description_request(self):
serial_request = """
<security_group name="test">
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_name_request(self):
serial_request = """
<security_group>
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"description": "test",
},
}
self.assertEquals(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer = security_groups.SecurityGroupRuleTemplate()
self.index_serializer = security_groups.SecurityGroupsTemplate()
self.default_serializer = security_groups.SecurityGroupTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
self.assertEqual(raw_rule['parent_group_id'],
tree.get('parent_group_id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port',
'group', 'group/name', 'group/tenant_id',
'ip_range', 'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
self.assertIn(child_tag, raw_rule)
seen.add(child_tag)
if child_tag in ('group', 'ip_range'):
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertIn(gr_child_tag, raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def _verify_security_group(self, raw_group, tree):
rules = raw_group['rules']
self.assertEqual('security_group', self._tag(tree))
self.assertEqual(raw_group['id'], tree.get('id'))
self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
self.assertEqual(raw_group['name'], tree.get('name'))
self.assertEqual(2, len(tree))
for child in tree:
child_tag = self._tag(child)
if child_tag == 'rules':
self.assertEqual(2, len(child))
for idx, gr_child in enumerate(child):
self.assertEqual(self._tag(gr_child), 'rule')
self._verify_security_group_rule(rules[idx], gr_child)
else:
self.assertEqual('description', child_tag)
self.assertEqual(raw_group['description'], child.text)
def test_rule_serializer(self):
raw_rule = dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group', tenant_id='tenant'),
ip_range=dict(cidr='10.0.0.0/8'))
rule = dict(security_group_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_rule', self._tag(tree))
self._verify_security_group_rule(raw_rule, tree)
def test_group_serializer(self):
rules = [dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.55.44.0/24')),
dict(
id='654',
parent_group_id='321',
ip_protocol='udp',
from_port='234',
to_port='567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.44.55.0/24'))]
raw_group = dict(
id='890',
description='description',
name='name',
tenant_id='tenant',
rules=rules)
sg_group = dict(security_group=raw_group)
text = self.default_serializer.serialize(sg_group)
tree = etree.fromstring(text)
self._verify_security_group(raw_group, tree)
def test_groups_serializer(self):
rules = [dict(
id='123',
parent_group_id='1234',
ip_protocol='tcp',
from_port='12345',
to_port='123456',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.123.0.0/24')),
dict(
id='234',
parent_group_id='2345',
ip_protocol='udp',
from_port='23456',
to_port='234567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.234.0.0/24')),
dict(
id='345',
parent_group_id='3456',
ip_protocol='tcp',
from_port='34567',
to_port='345678',
group=dict(name='group3', tenant_id='tenant3'),
ip_range=dict(cidr='10.345.0.0/24')),
dict(
id='456',
parent_group_id='4567',
ip_protocol='udp',
from_port='45678',
to_port='456789',
group=dict(name='group4', tenant_id='tenant4'),
ip_range=dict(cidr='10.456.0.0/24'))]
groups = [dict(
id='567',
description='description1',
name='name1',
tenant_id='tenant1',
rules=rules[0:2]),
dict(
id='678',
description='description2',
name='name2',
tenant_id='tenant2',
rules=rules[2:4])]
sg_groups = dict(security_groups=groups)
text = self.index_serializer.serialize(sg_groups)
tree = etree.fromstring(text)
self.assertEqual('security_groups', self._tag(tree))
self.assertEqual(len(groups), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group(groups[idx], child)
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
db_list = [
fakes.stub_instance(
1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance(
2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return instance_obj._make_instance_list(args[1],
instance_obj.InstanceList(),
db_list,
['metadata', 'system_metadata',
'security_groups', 'info_cache'])
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context):
return {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}]}
class SecurityGroupsOutputTest(test.TestCase):
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTest, self).setUp()
self.controller = security_groups.SecurityGroupController()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTest):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils used in the DFP Playground."""
import logging
import unittest
import mock
from models import AppCredential
from models import AppUser
import suds.sudsobject
from utils import oauth2required
from utils import retry
from utils import unpack_row
from utils import unpack_suds_object
from google.appengine.api import users
from google.appengine.ext import testbed
class UtilsTest(unittest.TestCase):
"""Tests for utils.py."""
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
app_credential = AppCredential(client_id='1', client_secret='secret')
app_credential.put()
self.new_app_user = users.User('johndoe@gmail.com')
self.existing_app_user = users.User('janedoe@gmail.com')
AppUser(user=self.existing_app_user, email='janedoe@gmail.com',
refresh_token='blah').put()
# mocking webapp2 request object
class RequestMock(object):
def redirect(self, uri):
return uri
self.request_mock = RequestMock()
# mocking handler
@oauth2required
def homepage_mock(request):
return '/'
self.homepage_mock = homepage_mock
# helper class for mocking retry decorator
class TestObject(object):
def __init__(test_self):
test_self.fails_remaining = 2
@retry(NotImplementedError)
def instant_success_func(test_self):
return 'success'
@retry(NotImplementedError)
def eventual_success_func(test_self):
if test_self.fails_remaining > 0:
test_self.fails_remaining -= 1
raise NotImplementedError()
return 'success'
@retry(NotImplementedError)
def failure_func(test_self):
raise NotImplementedError()
self.test_obj = TestObject()
logging.warning = mock.MagicMock(side_effect=logging.warning)
# simulating a line item object
self.suds_obj = suds.sudsobject.Object()
self.suds_obj.goal = suds.sudsobject.Object()
self.suds_obj.goal.units = 10000
self.suds_obj.goal.goalType = 'LIFETIME'
self.suds_obj.goal.unitType = 'IMPRESSIONS'
self.suds_obj.orderId = 987654321
self.suds_obj.endDateTime = suds.sudsobject.Object()
self.suds_obj.endDateTime.date = suds.sudsobject.Object()
self.suds_obj.endDateTime.date.year = 2015
self.suds_obj.endDateTime.date.day = 31
self.suds_obj.endDateTime.date.month = 12
self.suds_obj.endDateTime.timeZoneID = 'America/New_York'
self.suds_obj.endDateTime.second = 0
self.suds_obj.endDateTime.hour = 23
self.suds_obj.endDateTime.minute = 59
self.suds_obj.reserveAtCreation = False
def create_suds_test_obj(id_):
obj = suds.sudsobject.Object()
obj.id = id_
obj.type = 'PIXEL'
return obj
self.suds_obj.creativePlaceholders = [
create_suds_test_obj(0),
create_suds_test_obj(1),
create_suds_test_obj(2),
]
self.unpacked_suds_obj = {
'goal': {
'units': 10000,
'goalType': 'LIFETIME',
'unitType': 'IMPRESSIONS',
},
'orderId': 987654321,
'endDateTime': {
'date': {
'year': 2015,
'day': 31,
'month': 12,
},
'timeZoneID': 'America/New_York',
'second': 0,
'hour': 23,
'minute': 59,
},
'reserveAtCreation': False,
'creativePlaceholders': [
{
'id': 0,
'type': 'PIXEL',
},
{
'id': 1,
'type': 'PIXEL',
},
{
'id': 2,
'type': 'PIXEL',
},
],
}
# simulating a row object from PQLService
self.cols = ['id', 'browsername']
self.row_obj = suds.sudsobject.Object()
self.row_obj.values = [
suds.sudsobject.Object(),
suds.sudsobject.Object(),
]
self.row_obj.values[0].value = '123456'
self.row_obj.values[1].value = 'Test Browser'
self.unpacked_row_obj = {
'id': '123456',
'browsername': 'Test Browser',
}
def tearDown(self):
self.testbed.deactivate()
def testOauth2RedirectForNewUser(self):
users.get_current_user = mock.MagicMock(return_value=self.new_app_user)
self.assertEqual('/login', self.homepage_mock(self.request_mock))
def testOauth2RedirectForExistingUser(self):
users.get_current_user = mock.MagicMock(return_value=self.existing_app_user)
self.assertEqual('/', self.homepage_mock(self.request_mock))
def testEventualSuccess(self):
self.assertEqual('success', self.test_obj.eventual_success_func())
self.assertEqual(2, logging.warning.call_count)
def testFailure(self):
self.assertRaises(RuntimeError, self.test_obj.failure_func)
self.assertEqual(3, logging.warning.call_count)
def testInstantSuccess(self):
self.assertEqual('success', self.test_obj.instant_success_func())
self.assertEqual(0, logging.warning.call_count)
def testUnpackEmptyObject(self):
empty_obj = suds.sudsobject.Object()
self.assertEqual({}, unpack_suds_object(empty_obj))
def testUnpackObject(self):
self.assertEqual(self.unpacked_suds_obj, unpack_suds_object(self.suds_obj))
def testUnpackEmptyRow(self):
empty_row = suds.sudsobject.Object()
self.assertEqual({}, unpack_row(empty_row, self.cols))
def testUnpackRow(self):
self.assertEqual(self.unpacked_row_obj, unpack_row(self.row_obj, self.cols))
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# GNU Zebra is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# GNU Zebra is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Zebra; see the file COPYING. If not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
from opsvsi.docker import *
from opsvsi.opsvsitest import *
from opsvsiutils.systemutil import *
import time
SHOW_SFLOW_SAMPLING_RATE_INDEX = 6
SHOW_SFLOW_POLLING_INTERVAL_INDEX = 7
SHOW_SFLOW_HEADER_SIZE_INDEX = 8
SHOW_SFLOW_DATAGRAM_SIZE_INDEX = 9
class sflowConfigTest(OpsVsiTest):
def setupNet(self):
host_opts = self.getHostOpts()
switch_opts = self.getSwitchOpts()
static_topo = SingleSwitchTopo(k=0, hopts=host_opts, sopts=switch_opts)
self.net = Mininet(static_topo, switch=VsiOpenSwitch,
host=Host, link=OpsVsiLink,
controller=None, build=True)
def test_sflow_global_status(self):
'''
This function checks whether a new row gets created when you enable
sflow and a reference to it gets stored in the System table.
Also, it verifies whether the row name is set as 'global'
'''
info("\n\n######## Test to Verify Correct Setting of sFlow 'global' "
"Status ########\n")
s1 = self.net.switches[0]
s1.cmdCLI("configure terminal")
info("### Verifying creation of row in sFlow table ###\n")
s1.cmdCLI("sflow enable")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert 'enabled' in out[word_index + 2], 'creation of row in sFlow\
table unsuccessful'
info("### Verifying row name as 'global' for the sflow row "
"created ###\n")
ret = s1.cmd("ovsdb-client monitor sFlow --detach")
out = ret.split('\n')
assert 'global' in out[2], 'verification of sflow row name as "global"\
unsuccessful'
info("### Verifying removal of sflow reference from System "
" for 'sflow disable' ###\n")
s1.cmdCLI("no sflow enable")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert 'disabled' in out[word_index + 2], 'deletion of reference from \
System table successful'
def test_sflow_sampling_rate(self):
'''
This function verifies correct setting/unsetting of sflow default and
non-default sampling rate
'''
info("\n\n######## Test to Verify Correct Setting of sFlow Sampling "
"Rate ########\n")
s1 = self.net.switches[0]
info("### Verifying default sflow sampling rate ###\n")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '4096' in out[word_index + SHOW_SFLOW_SAMPLING_RATE_INDEX],\
'Default sflow sampling rate not set'
info("### Setting and Verifying specific sflow sampling rate ###\n")
s1.cmdCLI("sflow sampling 50000")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '50000' in out[word_index + SHOW_SFLOW_SAMPLING_RATE_INDEX],\
'Non-default sflow sampling rate not set'
info("### Unsetting specific rate set and verifying sflow sampling "
"getting set back to default ###\n")
s1.cmdCLI("no sflow sampling")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '4096' in out[word_index + SHOW_SFLOW_SAMPLING_RATE_INDEX],\
'sFlow sampling rate not reset to default'
def test_sflow_header_size(self):
'''
This function verifies correct setting/unsetting of sflow default and
non-default header size
'''
info("\n\n######## Test to Verify Correct Setting of sFlow Header "
"Size ########\n")
s1 = self.net.switches[0]
info("### Verifying default sflow header size ###\n")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '128' in out[word_index + SHOW_SFLOW_HEADER_SIZE_INDEX],\
'Default sflow header size not set'
info("### Setting and Verifying specific sflow header size ###\n")
s1.cmdCLI("sflow header-size 70")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '70' in out[word_index + SHOW_SFLOW_HEADER_SIZE_INDEX],\
'Non-default sflow header size not set'
info("### Unsetting specific header size set and verifying"
"sflow header size getting set back to default ###\n")
s1.cmdCLI("no sflow header-size")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '128' in out[word_index + SHOW_SFLOW_HEADER_SIZE_INDEX],\
'sFlow header size not reset to default'
def test_sflow_max_datagram_size(self):
'''
This function verifies correct setting/unsetting of sflow default and
non-default max-datagram size
'''
info("\n\n######## Test to Verify Correct Setting of sFlow Max-"
"Datagram Size ########\n")
s1 = self.net.switches[0]
info("### Verifying default sflow max-datagram size ###\n")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '1400' in out[word_index + SHOW_SFLOW_DATAGRAM_SIZE_INDEX],\
'Default sflow max-datagram size not set'
info("### Setting and Verifying specific sflow max-datagram size "
"###\n")
s1.cmdCLI("sflow max-datagram-size 1000")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '1000' in out[word_index + SHOW_SFLOW_DATAGRAM_SIZE_INDEX],\
'Non-default sflow max-datagram size not set'
info("### Unsetting specific max-datagram size set and verifying sflow" \
"max-datagram size getting set back to default ###\n")
s1.cmdCLI("no sflow max-datagram-size")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '1400' in out[word_index + SHOW_SFLOW_DATAGRAM_SIZE_INDEX],\
'sFlow max-datagram size not reset to default'
def test_sflow_polling(self):
'''
This function verifies correct setting/unsetting of sflow default and
non-default polling interval
'''
info("\n\n######## Test to Verify Correct Setting of sFlow Polling "
"interval ########\n")
s1 = self.net.switches[0]
info("### Verifying default sflow polling interval ###\n")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '30' in out[word_index + SHOW_SFLOW_POLLING_INTERVAL_INDEX],\
'Default sflow polling interval not set'
info("### Setting and Verifying specific sflow polling interval "
"###\n")
s1.cmdCLI("sflow polling 10")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '10' in out[word_index + SHOW_SFLOW_POLLING_INTERVAL_INDEX],\
'Non-default sflow polling interval not set'
info("### Unsetting specific polling interval set and verifying sflow" \
" polling interval getting set back to default ###\n")
s1.cmdCLI("no sflow polling")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '30' in out[word_index + SHOW_SFLOW_POLLING_INTERVAL_INDEX],\
'sFlow polling interval not reset to default'
def test_sflow_collector(self):
'''
Thus function checks whether collector ip, port and vrf gets correctly
set/unset and whether default port and vrf values get set when not
passed by the user
'''
info("\n\n######## Test to Verify Correct Setting of sFlow "
"Collector ########\n")
s1 = self.net.switches[0]
info("### Passing only IPv4 sflow collector ip and verifying 'ip, "
"default port and vrf' set ###\n")
s1.cmdCLI("sflow collector 255.255.255.255")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '255.255.255.255/6343/vrf_default' in out[word_index + 3], \
'collector ip with default port and vrf not set'
info("### Passing IPv4 sflow collector ip and port and verifying 'ip, "
"port and default vrf' set ###\n")
s1.cmdCLI("sflow collector 255.255.255.254 port 1234")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '255.255.255.254/1234/vrf_default' in out[word_index + 3], \
'collector ip and port with default vrf not set'
info("### Passing first sflow collector ip again and "
"verifying if duplicate collector error thrown ###\n")
ret = s1.cmdCLI("sflow collector 255.255.255.254 port 1234")
assert 'sFlow collector already present.' in ret, \
'Duplicate sFlow collector validation failed'
info("### Passing IPv4 sflow collector ip, port and default vrf and "
"verifying 'ip, port and vrf set ###\n")
s1.cmdCLI("sflow collector 255.255.255.253 port 5678 vrf vrf_default")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '255.255.255.253/5678/vrf_default' in out[word_index + 3], \
'collector ip, port and vrf not set'
info("### Passing fourth sflow collector ip and "
"verifying if error is thrown for more than 3 collectors ###\n")
ret = s1.cmdCLI("sflow collector 255.255.255.252")
assert 'Maximum of 3 sFlow collectors allowed.' in ret, \
'Maximum sFlow collectors validation failed'
info("### Removing second and third collectors and verifying the 'no'"
" form of the command ###\n")
s1.cmdCLI("no sflow collector 255.255.255.254 port 1234")
s1.cmdCLI("no sflow collector 255.255.255.253 port 5678 vrf "
"vrf_default")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert not ('255.255.255.254/1234/vrf_default' \
in out and '255.255.255.253/5678/vrf_default' in out), \
'sFlow collectors could not be deleted'
info("### Passing non-default vrf and verifying the default vrf "
"check ###\n")
ret = s1.cmdCLI("sflow collector 255.255.255.252 vrf vrf1")
assert 'Only vrf_default is permitted.' in ret, \
'collector ip, port and vrf not set'
info("### Passing IPv6 sflow collector ip, port and default vrf and "
"verifying 'ip, port and vrf set ###\n")
s1.cmdCLI("sflow collector ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff "
"port 65535 vrf vrf_default")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/65535/vrf_default' \
in out[word_index + 4], 'IPv6 collector ip, port and vrf \
not set'
def test_sflow_agent_intf(self):
'''
This function verifies correct setting/unsetting of L3 agent interface
and its family and also validates the interface before setting it.
'''
info("\n\n######## Test to Verify Correct Setting of sFlow Agent "
"Interface ########\n")
s1 = self.net.switches[0]
info("### Verfiying check for invalid interface ###\n")
ret = s1.cmdCLI("sflow agent-interface 100")
assert 'Invalid interface' in ret, 'Interface check not successful'
info("### Verifying correct setting of L3 agent interface ###\n")
s1.cmdCLI("interface 19")
s1.cmdCLI("ip address 10.10.10.10/32")
s1.cmdCLI("exit")
s1.cmdCLI("sflow agent-interface 19")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '19' in out[word_index + 5], 'L3 agent-interface not correctly \
set'
info("### Verifying correct setting of L3 agent interface and family"
" ###\n")
s1.cmdCLI("interface 29")
s1.cmdCLI("ip address 20.20.20.20/32")
s1.cmdCLI("exit")
s1.cmdCLI("sflow agent-interface 29 ipv4")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert '29' in out[word_index + 5] and 'ipv4' in out[word_index + 6], \
'L3 agent-interface and family not correctly set'
info("### Verifying correct unsetting of L3 agent interface and family"
" ###\n")
s1.cmdCLI("no sflow agent-interface")
ret = s1.cmdCLI("do show sflow")
out = ret.split('\n')
word_index = out.index('sFlow Configuration ')
assert not ('29' in out[word_index + 5] and 'ipv4' in \
out[word_index + 6]), 'L3 agent-interface and family not \
correctly set'
def test_sflow_show_interface(self):
'''
This function verifies the output of 'show sflow interface INTERFACE'
command across the configuration set in the sFlow table
'''
info("\n\n######## Test to Verify 'show sflow INTERFACE' for sFlow "
"Configuration ########\n")
s1 = self.net.switches[0]
s1.cmdCLI("sflow enable")
s1.cmdCLI("sflow sampling 20")
s1.cmdCLI("sflow collector 255.255.255.254 port 1234")
ret = s1.cmdCLI("do show sflow interface 1")
out = ret.split('\n')
word_index = out.index('sFlow Configuration - Interface 1')
assert 'sFlow enabled' in out[word_index + 2] \
and 'Sampling Rate 20' in out[word_index + 3] \
and 'Number of Samples 0' in out[word_index + 4], \
"### 'show sflow INTERFACE' verification failed###\n"
s1.cmdCLI("interface 1")
s1.cmdCLI("no sflow enable")
s1.cmdCLI("exit")
ret = s1.cmdCLI("do show sflow interface 1")
out = ret.split('\n')
word_index = out.index('sFlow Configuration - Interface 1')
assert 'sFlow disabled' in \
out[word_index + 2] , \
"### 'show sflow INTERFACE' verification failed###\n"
info("### 'show sflow INTERFACE' verification successful###\n")
'''Removing configuration'''
s1.cmdCLI("no sflow enable")
s1.cmdCLI("no sflow sampling")
s1.cmdCLI("no sflow collector 255.255.255.254 port 1234")
ret = s1.cmdCLI("do show running-config")
out = ret.splitlines()
out = [x.strip(' ') for x in out]
word_index = out.index('interface 1')
assert "no sflow enable" in out[word_index + 1], "Show running "\
"configuration check failed for interface level sFlow config"
ret = s1.cmdCLI("do show running-config interface 1")
assert "no sflow enable" in ret, "Show running config interface "\
"command check failed for interface level sFlow config"
ret = s1.cmdCLI("do show interface 1")
assert "sFlow is disabled" in ret, "Show interface check failed for "\
"interface level sFlow config"
info("### Interface level sFlow config verification successful###\n")
def test_sflow_show_running(self):
'''
This function verifies the output of 'show running-config' command
across the configuration set in the sFlow table
'''
info("\n\n######## Test to Verify 'show running-config' for sFlow "
"Configuration ########\n")
s1 = self.net.switches[0]
s1.cmdCLI("sflow enable")
s1.cmdCLI("sflow sampling 54321")
s1.cmdCLI("sflow agent-interface 19 ipv6")
ret = s1.cmdCLI("do show running-config")
out = ret.split('\n')
word_index = out.index('sflow enable')
assert 'sflow collector 255.255.255.255' in \
out[word_index+1] and 'sflow collector ' \
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff port 65535 ' \
'vrf vrf_default' in out[word_index+2] and \
'sflow agent-interface 19 ipv6' in \
out[word_index+3] and 'sflow sampling 54321' in \
out[word_index+4], 'show running-config failure'
info("### 'show running-config' verification successful###\n\n\n")
class Test_vtysh_ct_sflow_cli:
def setup_class(cls):
Test_vtysh_ct_sflow_cli.test = sflowConfigTest()
def teardown_class(cls):
Test_vtysh_ct_sflow_cli.test.net.stop()
def test_sflow_global_status(self):
self.test.test_sflow_global_status()
def test_sflow_sampling_rate(self):
self.test.test_sflow_sampling_rate()
def test_sflow_max_datagram_size(self):
self.test.test_sflow_max_datagram_size()
def test_sflow_header_size(self):
self.test.test_sflow_header_size()
def test_sflow_polling(self):
self.test.test_sflow_polling()
def test_sflow_collector(self):
self.test.test_sflow_collector()
def test_sflow_agent_intf(self):
self.test.test_sflow_agent_intf()
def test_sflow_show_interface(self):
self.test.test_sflow_show_interface()
def test_sflow_show_running(self):
self.test.test_sflow_show_running()
def __del__(self):
del self.test
|
|
from random import uniform
import numpy as np
import theano
from theano import config
import theano.tensor as tensor
from theano.tensor.signal import downsample
from collections import OrderedDict, defaultdict
def randi(N):
""" get random integer in range [0, N) """
return int(uniform(0, N))
def merge_init_structs(s0, s1):
""" merge struct s1 into s0 """
for k in s1['model']:
assert (not k in s0['model']), 'Error: looks like parameter %s is trying to be initialized twice!' % (k, )
s0['model'][k] = s1['model'][k] # copy over the pointer
s0['update'].extend(s1['update'])
s0['regularize'].extend(s1['regularize'])
def initw(n,d): # initialize matrix of this size
magic_number = 0.1
return (np.random.rand(n,d) * 2 - 1) * magic_number # U[-0.1, 0.1]
def initwTh(n,d,magic_number=0.1): # initialize matrix of this size
return ((np.random.rand(n,d) * 2 - 1) * magic_number).astype(config.floatX) # U[-0.1, 0.1]
def _p(pp, name):
return '%s_%s' % (pp, name)
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def accumNpDicts(d0, d1):
""" forall k in d0, d0 += d1 . d's are dictionaries of key -> numpy array """
for k in d1:
if k in d0:
d0[k] += d1[k]
else:
d0[k] = d1[k]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
if type(tparams) == list:
for i in xrange(len(params)):
tparams[i].set_value(params[i])
else:
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
if type(zipped) == list:
new_params = []
for vv in zipped:
new_params.append(vv.get_value())
else:
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
def forwardSubRoutine(Hin,Hout, X, WLSTM,IFOG,IFOGf,C,n,d):
for t in xrange(n):
prev = np.zeros(d) if t == 0 else Hout[t-1]
#tanhC_version = 1
Hin[t,0] = 1
Hin[t,1:1+d] = X[t]
Hin[t,1+d:] = prev
# compute all gate activations. dots:
IFOG[t] = Hin[t].dot(WLSTM)
IFOGf[t,:3*d] = 1.0/(1.0+np.exp(-IFOG[t,:3*d])) # sigmoids; these are the gates
IFOGf[t,3*d:] = np.tanh(IFOG[t, 3*d:]) # tanh
C[t] = IFOGf[t,:d] * IFOGf[t, 3*d:]
if t > 0: C[t] += IFOGf[t,d:2*d] * C[t-1]
Hout[t] = IFOGf[t,2*d:3*d] * np.tanh(C[t])
# Hout[t] = IFOGf[t,2*d:3*d] * C[t]
return Hin, Hout, IFOG,IFOGf,C
def softmax(x,axis = -1):
xs = x.shape
ndim = len(xs)
if axis == -1:
axis = ndim -1
z = np.max(x,axis=axis)
y = x - z[...,np.newaxis] # for numerical stability shift into good numerical range
e1 = np.exp(y)
p1 = e1 / np.sum(e1,axis=axis)[...,np.newaxis]
return p1
def cosineSim(x,y):
n1 = np.sqrt(np.sum(x**2))
n2 = np.sqrt(np.sum(y**2))
sim = x.T.dot(y)/(n1*n2) if n1 !=0.0 and n2!= 0.0 else 0.0
return sim
def sliceT(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
#Theano functions
def ReLU(x):
y = tensor.maximum(0.0, x)
return(y)
def Sigmoid(x):
y = tensor.nnet.sigmoid(x)
return(y)
def Tanh(x):
y = tensor.tanh(x)
return(y)
def Iden(x):
y = x
return(y)
def myMaxPool(x, ps=[],method='downsamp'):
if method == 'downsamp':
y = downsample.max_pool_2d(input= x, ds=ps, ignore_border=True)
elif method == 'max':
y = tensor.max(x, axis=3).max(axis=2)
return(y)
def preProBuildWordVocab(sentence_iterator, word_count_threshold, options = None):
import time
# count up all word counts so that we can threshold
# this shouldnt be too expensive of an operation
print 'preprocessing word counts and creating vocab based on word count threshold %d' % (word_count_threshold, )
t0 = time.time()
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent['tokens']:
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print 'filtered words from %d to %d in %.2fs' % (len(word_counts), len(vocab), time.time() - t0)
if (options != None) and (options['class_out_factoring'] == 1):
print 'Clustering words into %d calsses for output factorization ' % (options['nClasses'])
t0 = time.time()
if options['class_inp_file'] == None:
fInName = 'input' + option['dataset'] + 'TrainTok'
fIn = open(os.path.join(options['clust_tool_dir'], fInName + '.txt'),'w')
for st in sentence_iterator:
fIn.write(' '.join(st['tokens']))
fIn.write('\n')
fIn.close()
owd = os.getcwd()
os.chdir(options['clust_tool_dir'])
clust_cmd = './wcluster --text '+ fInName + '.txt --c ' + str(options['nClasses'])
print ' Invoking the clustering tool now...'
os.system(clust_cmd)
os.chdir(owd)
options['class_inp_file'] = os.path.join(options['clust_tool_dir'], fInName + '-c' + str(options['nClasses']) + '-p1.out/paths')
print 'Clustering is done in %.2fs ... Now onto processing the outputs' % (time.time() - t0)
clustOut = open(options['class_inp_file'],'r').readlines()
classes = defaultdict(list)
treetocix = {}
for cls in clustOut:
lineS = cls.split()
if lineS[0] not in treetocix:
treetocix[lineS[0]] = len(treetocix)
cix = treetocix[lineS[0]]
if int(lineS[2]) >= word_count_threshold:
classes[cix].append({'w':lineS[1],'c':int(lineS[2])})
# Re-arrange the vocabulary by grouping into classes
vocab = []
clstoix = {}
wordtocls= {}
class_counts = defaultdict(int)
for cls in classes:
# +1 is to compensate for insertion of '.' later
clstoix[cls] = {'strt':len(vocab)+1,'len':len(classes[cls])}
for wSt in classes[cls]:
class_counts[cls] += wSt['c']
wordtocls[wSt['w']] = cls
vocab.append(wSt['w'])
# Adding special STOP class containing only '.' to the class list
# #START# is not needed because it is force fed to the model and
# model doesn't ever have to produce the class output #START#
treetocix['STOP'] = len(treetocix)
cls = treetocix['STOP']
classes[cls] = [{'w':'.', 'c':nsents}]
wordtocls['.'] = cls
clstoix[cls] = {'strt':0,'len':1}
class_counts[cls] = nsents
cixtotree = {}
for treeHash in treetocix:
cixtotree[treetocix[treeHash]] = treeHash
print 'Class based factorization of output done %.2fs' % (time.time() - t0)
# with K distinct words:
# - there are K+1 possible inputs (START token and all the words)
# - there are K+1 possible outputs (END token and all the words)
# we use ixtoword to take predicted indices and map them to words for output visualization
# we use wordtoix to take raw words and get their index in word vector matrix
ixtoword = {}
ixtoword[0] = '.' # period at the end of the sentence. make first dimension be end token
wordtoix = {}
wordtoix['#START#'] = 0 # make first vector be the start token
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
if (options == None) or (options['class_out_factoring'] == 0):
##############################################################################################
# compute bias vector, which is related to the log probability of the distribution
# of the labels (words) and how often they occur. We will use this vector to initialize
# the decoder weights, so that the loss function doesnt show a huge increase in performance
# very quickly (which is just the network learning this anyway, for the most part). This makes
# the visualizations of the cost function nicer because it doesn't look like a hockey stick.
# for example on Flickr8K, doing this brings down initial perplexity from ~2500 to ~170.
##############################################################################################
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector) # shift to nice numeric range
return wordtoix, ixtoword, bias_init_vector
else:
##############################################################################################
# Ths matrices below is used withing the lstm generator module inorder to correctly
# factorize the output and compute softmax locally within a class.
# This maps word id to its class id and class location info within the composite decode matrix
# idx 0 is for class id. idx 1 is for class start location. idx 2 is for class end location
# idx 3 is for word id within its class, needed to map class softmax to overall vocab
##############################################################################################
ixtoclsinfo = np.zeros((len(ixtoword),4),dtype=np.int32)
for cls in classes:
for i,wSt in enumerate(classes[cls]):
cS = clstoix[cls]['strt']
cE = clstoix[cls]['strt'] + clstoix[cls]['len']
ixtoclsinfo[wordtoix.get(wSt['w'],0), :] = [cls, cS, cE, i]
##############################################################################################
# For class based output clustering, we need to intialize two sets of biases. One is inter-class
# bias reflecting the frequencies of each aggr lass. Next is the n intra-class biases, which is
# only for items words within a class
##############################################################################################
bias_init_inter_class = np.array([1.0*class_counts[cls] for cls in clstoix])
bias_init_inter_class /= np.sum(bias_init_inter_class) # normalize to frequencies
bias_init_inter_class = np.log(bias_init_inter_class)
bias_init_inter_class -= np.max(bias_init_inter_class) # shift to nice numeric range
max_cls_len = np.max([clstoix[cls]['len'] for cls in clstoix])
bias_init_intra_class = np.zeros((1,len(classes),max_cls_len))
for cls in classes:
idx = np.arange(0, clstoix[cls]['len'])
bias_init_intra_class[0, cls, idx] = np.array([1.0*word_counts[wrd['w']] for wrd in classes[cls]])
bias_init_intra_class[0, cls, idx] /= np.sum(bias_init_intra_class[0,cls,idx]) # normalize to frequencies
bias_init_intra_class[0, cls, :] = np.log(bias_init_intra_class[0,cls,:])
bias_init_intra_class[0, cls, :] -= np.max(bias_init_intra_class[0,cls,:]) # shift to nice numeric range
return [wordtoix, classes] , [ixtoword, cixtotree, ixtoclsinfo], [bias_init_intra_class, bias_init_inter_class]
# ========================================================================================
# LSTM LAYER DEFINITION
# This is a simple forward propogating lstm layer with no bells and whistles,
# This can be used to encode an input sequence or for training mode in image captioning
# Supports arbitrarily deep lstm layer, but only forward propogation and is only
# ========================================================================================
def basic_lstm_layer(tparams, state_below, aux_input, use_noise, options, prefix='lstm', sched_prob_mask = []):
nsteps = state_below.shape[0]
h_depth = options.get('hidden_depth',1)
h_sz = options['hidden_size']
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
def _step(x_in, xp_m, h_, c_, xwout_, xAux):
preact = tensor.dot(sliceT(h_, 0, h_sz), tparams[_p(prefix, 'W_hid')])
if options.get('sched_sampling_mode',None) == None:
preact += x_in
else:
xy_emb = tensor.dot(xwout_, tparams[_p(prefix, 'W_inp')] + tparams[_p(prefix, 'b')])
temp_container = tensor.concatenate([xy_emb.dimshuffle('x',0,1), x_in.dimshuffle('x', 0, 1)],axis=0)
preact += temp_container[ xp_m, tensor.arange(n_samples),:]
if options.get('en_aux_inp',0):
preact += tensor.dot(xAux,tparams[_p(prefix,'W_aux')])
# preact += tparams[_p(prefix, 'b')]
h = [[]]*h_depth
c = [[]]*h_depth
for di in xrange(h_depth):
i = tensor.nnet.sigmoid(sliceT(preact, 0, h_sz))
f = tensor.nnet.sigmoid(sliceT(preact, 1, h_sz))
o = tensor.nnet.sigmoid(sliceT(preact, 2, h_sz))
c[di] = tensor.tanh(sliceT(preact, 3, h_sz))
c[di] = f * sliceT(c_, di, h_sz) + i * c[di]
h[di] = o * tensor.tanh(c[di])
if di < (h_depth - 1):
preact = tensor.dot(sliceT(h_, di+1, h_sz), tparams[_p(prefix, ('W_hid_' + str(di+1)))]) + \
tensor.dot(h[di], tparams[_p(prefix, ('W_inp_' + str(di+1)))])
c_out = tensor.concatenate(c,axis=1)
h_out = tensor.concatenate(h,axis=1)
y = tensor.dot(h[-1],tparams['Wd']) + tparams['bd']
xWIdx = tensor.argmax(y, axis=-1,keepdims=True)
xw_out = tparams['Wemb'][xWIdx.flatten()].reshape([n_samples,options['word_encoding_size']])
return h_out, c_out, xw_out
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W_inp')]) + tparams[_p(prefix, 'b')])
if options.get('en_aux_inp',0) == 0:
aux_input = []
if options.get('sched_sampling_mode',None) == None:
sched_prob_mask = tensor.alloc(1, nsteps, n_samples)
rval, updates = theano.scan(_step,
sequences=[state_below, sched_prob_mask],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
h_depth*h_sz),
tensor.alloc(numpy_floatX(0.),
n_samples,
h_depth*h_sz),
tensor.alloc(numpy_floatX(0.),
n_samples,
options['word_encoding_size'])
],
non_sequences = [aux_input] ,
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval, updates
# ======================== Dropout Layer =================================================
# Implements a simple dropout layer. When droput is on it drops units according to speeci-
# -fied prob and scales the rest. NOP otherwise
# ========================================================================================
def dropout_layer(inp, use_noise, trng, prob, shp):
scale = 1.0/(1.0-prob)
proj = tensor.switch(use_noise,
(inp *
trng.binomial(shp,
p=prob, n=1,
dtype=inp.dtype)*scale),
inp)
return proj
# ======================== Multimodal cosine sim =========================================
# Embeds Image feature vector and computes the cosine sim and softmax with a given sent emb
# ========================================================================================
def multimodal_cosine_sim_softmax(xI, sent_emb, tparams, sm_f):
embImg = tensor.dot(xI, tparams['WIemb']) + tparams['b_Img']
sim_score = tensor.dot(embImg,sent_emb.T)/tensor.dot(embImg.norm(2,axis=1)[:,None],sent_emb.norm(2,axis=1)[None,:])
# Now to implement the cost function!
# We can use two kinds of cost, ranking hinge loss or negative log likelihod
# Below we implement negetive log_likelihood
smooth_factor = tensor.as_tensor_variable(numpy_floatX(sm_f), name='sm_f')
probMatch = tensor.nnet.softmax(sim_score*smooth_factor)
return probMatch,sim_score
# ======================== Multimodal cosine sim =========================================
# Embeds Image feature vector and computes the cosine sim and softmax with a given sent emb
# ========================================================================================
def multimodal_euc_dist_softmax(xI, sent_emb, tparams, sm_f):
embImg = tensor.dot(xI, tparams['WIemb']) + tparams['b_Img']
euc_dist = ((embImg[:,None,:] - sent_emb[None,:,:])**2).sum(axis=-1) / (xI.shape[0] * xI.shape[1])
# Now to implement the cost function!
# We can use two kinds of cost, ranking hinge loss or negative log likelihod
# Below we implement negetive log_likelihood
smooth_factor = tensor.as_tensor_variable(numpy_floatX(sm_f), name='sm_f')
probMatch = tensor.nnet.softmax(-euc_dist*smooth_factor)
return probMatch, euc_dist
|
|
import logging
import os
from pprint import pformat
from typing import Union, List
import pandas as pd
import yaml
from ludwig.backend import Backend, initialize_backend
from ludwig.constants import HYPEROPT, TRAINING, VALIDATION, TEST, COMBINED, \
LOSS, TYPE, RAY
from ludwig.features.feature_registries import output_type_registry
from ludwig.hyperopt.execution import get_build_hyperopt_executor
from ludwig.hyperopt.sampling import get_build_hyperopt_sampler
from ludwig.hyperopt.utils import update_hyperopt_params_with_defaults, \
print_hyperopt_results, save_hyperopt_stats
from ludwig.utils.defaults import default_random_seed, merge_with_defaults
from ludwig.utils.misc_utils import get_from_registry
logger = logging.getLogger(__name__)
def hyperopt(
config: Union[str, dict],
dataset: Union[str, dict, pd.DataFrame] = None,
training_set: Union[str, dict, pd.DataFrame] = None,
validation_set: Union[str, dict, pd.DataFrame] = None,
test_set: Union[str, dict, pd.DataFrame] = None,
training_set_metadata: Union[str, dict] = None,
data_format: str = None,
experiment_name: str = 'hyperopt',
model_name: str = 'run',
skip_save_training_description: bool = False,
skip_save_training_statistics: bool = False,
skip_save_model: bool = False,
skip_save_progress: bool = False,
skip_save_log: bool = False,
skip_save_processed_input: bool = True,
skip_save_unprocessed_output: bool = False,
skip_save_predictions: bool = False,
skip_save_eval_stats: bool = False,
skip_save_hyperopt_statistics: bool = False,
output_directory: str = 'results',
gpus: Union[str, int, List[int]] = None,
gpu_memory_limit: int = None,
allow_parallel_threads: bool = True,
backend: Union[Backend, str] = None,
random_seed: int = default_random_seed,
debug: bool = False,
**kwargs,
) -> List[dict]:
"""This method performs an hyperparameter optimization.
# Inputs
:param config: (Union[str, dict]) config which defines
the different parameters of the model, features, preprocessing and
training. If `str`, filepath to yaml configuration file.
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used in the experiment.
If it has a split column, it will be used for splitting (0 for train,
1 for validation, 2 for test), otherwise the dataset will be
randomly split.
:param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing training data.
:param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing validation data.
:param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing test data.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param experiment_name: (str, default: `'experiment'`) name for
the experiment.
:param model_name: (str, default: `'run'`) name of the model that is
being used.
:param skip_save_training_description: (bool, default: `False`) disables
saving the description JSON file.
:param skip_save_training_statistics: (bool, default: `False`) disables
saving training statistics JSON file.
:param skip_save_model: (bool, default: `False`) disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation metric improves, but if the model is really big
that can be time consuming. If you do not want to keep
the weights and just find out what performance a model can get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on and the returned model
will have the weights obtained at the end of training, instead of
the weights of the epoch with the best validation performance.
:param skip_save_progress: (bool, default: `False`) disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:param skip_save_log: (bool, default: `False`) disables saving
TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
but if it is not needed turning it off can slightly increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param skip_save_unprocessed_output: (bool, default: `False`) by default
predictions and their probabilities are saved in both raw
unprocessed numpy files containing tensors and as postprocessed
CSV files (one for each output feature). If this parameter is True,
only the CSV ones are saved and the numpy ones are skipped.
:param skip_save_predictions: (bool, default: `False`) skips saving test
predictions CSV files.
:param skip_save_eval_stats: (bool, default: `False`) skips saving test
statistics JSON file.
:param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving
hyperopt stats file.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param random_seed: (int: default: 42) random seed used for weights
initialization, splits and any other random function.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (List[dict]) The results for the hyperparameter optimization
"""
backend = initialize_backend(backend)
# check if config is a path or a dict
if isinstance(config, str): # assume path
with open(config, 'r') as def_file:
config_dict = yaml.safe_load(def_file)
else:
config_dict = config
# merge config with defaults
config = merge_with_defaults(config_dict)
if HYPEROPT not in config:
raise ValueError(
"Hyperopt Section not present in config"
)
hyperopt_config = config["hyperopt"]
update_hyperopt_params_with_defaults(hyperopt_config)
# print hyperopt config
logger.info(pformat(hyperopt_config, indent=4))
logger.info('\n')
sampler = hyperopt_config["sampler"]
executor = hyperopt_config["executor"]
parameters = hyperopt_config["parameters"]
split = hyperopt_config["split"]
output_feature = hyperopt_config["output_feature"]
metric = hyperopt_config["metric"]
goal = hyperopt_config["goal"]
######################
# check validity of output_feature / metric/ split combination
######################
if split == TRAINING:
if training_set is None and (
config['preprocessing']['split_probabilities'][0]
<= 0):
raise ValueError(
'The data for the specified split for hyperopt "{}" '
'was not provided, '
'or the split amount specified in the preprocessing section '
'of the config is not greater than 0'.format(split)
)
elif split == VALIDATION:
if validation_set is None and (
config['preprocessing']['split_probabilities'][1]
<= 0):
raise ValueError(
'The data for the specified split for hyperopt "{}" '
'was not provided, '
'or the split amount specified in the preprocessing section '
'of the config is not greater than 0'.format(split)
)
elif split == TEST:
if test_set is None and (
config['preprocessing']['split_probabilities'][2]
<= 0):
raise ValueError(
'The data for the specified split for hyperopt "{}" '
'was not provided, '
'or the split amount specified in the preprocessing section '
'of the config is not greater than 0'.format(split)
)
else:
raise ValueError(
'unrecognized hyperopt split "{}". '
'Please provide one of: {}'.format(
split, {TRAINING, VALIDATION, TEST}
)
)
if output_feature == COMBINED:
if metric != LOSS:
raise ValueError(
'The only valid metric for "combined" output feature is "loss"'
)
else:
output_feature_names = set(
of['name'] for of in config['output_features']
)
if output_feature not in output_feature_names:
raise ValueError(
'The output feature specified for hyperopt "{}" '
'cannot be found in the config. '
'Available ones are: {} and "combined"'.format(
output_feature, output_feature_names
)
)
output_feature_type = None
for of in config['output_features']:
if of['name'] == output_feature:
output_feature_type = of[TYPE]
feature_class = get_from_registry(
output_feature_type,
output_type_registry
)
if metric not in feature_class.metric_functions:
# todo v0.4: allow users to specify also metrics from the overall
# and per class metrics from the trainign stats and in general
# and potprocessed metric
raise ValueError(
'The specified metric for hyperopt "{}" is not a valid metric '
'for the specified output feature "{}" of type "{}". '
'Available metrics are: {}'.format(
metric,
output_feature,
output_feature_type,
feature_class.metric_functions.keys()
)
)
hyperopt_sampler = get_build_hyperopt_sampler(
sampler[TYPE]
)(goal, parameters, **sampler)
hyperopt_executor = get_build_hyperopt_executor(
executor[TYPE]
)(hyperopt_sampler, output_feature, metric, split, **executor)
hyperopt_results = hyperopt_executor.execute(
config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
backend=backend,
random_seed=random_seed,
debug=debug,
**kwargs
)
if backend.is_coordinator():
print_hyperopt_results(hyperopt_results)
if not skip_save_hyperopt_statistics:
if not os.path.exists(output_directory):
os.makedirs(output_directory)
hyperopt_stats = {
'hyperopt_config': hyperopt_config,
'hyperopt_results': hyperopt_results
}
save_hyperopt_stats(hyperopt_stats, output_directory)
logger.info('Hyperopt stats saved to: {}'.format(output_directory))
logger.info('Finished hyperopt')
return hyperopt_results
|
|
import collections
import logging
import numpy as np
from typing import Any, List, Dict, Tuple, TYPE_CHECKING, Union
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.evaluation.collectors.sample_collector import _SampleCollector
from ray.rllib.evaluation.episode import MultiAgentEpisode
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import override
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.typing import AgentID, EpisodeID, EnvID, PolicyID, \
TensorType
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.util.debug import log_once
_, tf, _ = try_import_tf()
torch, _ = try_import_torch()
if TYPE_CHECKING:
from ray.rllib.agents.callbacks import DefaultCallbacks
logger = logging.getLogger(__name__)
def to_float_np_array(v: List[Any]) -> np.ndarray:
if torch and torch.is_tensor(v[0]):
raise ValueError
arr = np.array(v)
if arr.dtype == np.float64:
return arr.astype(np.float32) # save some memory
return arr
class _AgentCollector:
"""Collects samples for one agent in one trajectory (episode).
The agent may be part of a multi-agent environment. Samples are stored in
lists including some possible automatic "shift" buffer at the beginning to
be able to save memory when storing things like NEXT_OBS, PREV_REWARDS,
etc.., which are specified using the trajectory view API.
"""
_next_unroll_id = 0 # disambiguates unrolls within a single episode
def __init__(self, shift_before: int = 0):
self.shift_before = max(shift_before, 1)
self.buffers: Dict[str, List] = {}
# The simple timestep count for this agent. Gets increased by one
# each time a (non-initial!) observation is added.
self.count = 0
def add_init_obs(self, episode_id: EpisodeID, agent_index: int,
env_id: EnvID, t: int, init_obs: TensorType,
view_requirements: Dict[str, ViewRequirement]) -> None:
"""Adds an initial observation (after reset) to the Agent's trajectory.
Args:
episode_id (EpisodeID): Unique ID for the episode we are adding the
initial observation for.
agent_index (int): Unique int index (starting from 0) for the agent
within its episode.
env_id (EnvID): The environment index (in a vectorized setup).
t (int): The time step (episode length - 1). The initial obs has
ts=-1(!), then an action/reward/next-obs at t=0, etc..
init_obs (TensorType): The initial observation tensor (after
`env.reset()`).
view_requirements (Dict[str, ViewRequirements])
"""
if SampleBatch.OBS not in self.buffers:
self._build_buffers(
single_row={
SampleBatch.OBS: init_obs,
SampleBatch.EPS_ID: episode_id,
SampleBatch.AGENT_INDEX: agent_index,
"env_id": env_id,
"t": t,
})
self.buffers[SampleBatch.OBS].append(init_obs)
self.buffers[SampleBatch.EPS_ID].append(episode_id)
self.buffers[SampleBatch.AGENT_INDEX].append(agent_index)
self.buffers["env_id"].append(env_id)
self.buffers["t"].append(t)
def add_action_reward_next_obs(self, values: Dict[str, TensorType]) -> \
None:
"""Adds the given dictionary (row) of values to the Agent's trajectory.
Args:
values (Dict[str, TensorType]): Data dict (interpreted as a single
row) to be added to buffer. Must contain keys:
SampleBatch.ACTIONS, REWARDS, DONES, and NEXT_OBS.
"""
assert SampleBatch.OBS not in values
values[SampleBatch.OBS] = values[SampleBatch.NEXT_OBS]
del values[SampleBatch.NEXT_OBS]
for k, v in values.items():
if k not in self.buffers:
self._build_buffers(single_row=values)
self.buffers[k].append(v)
self.count += 1
def build(self, view_requirements: Dict[str, ViewRequirement]) -> \
SampleBatch:
"""Builds a SampleBatch from the thus-far collected agent data.
If the episode/trajectory has no DONE=True at the end, will copy
the necessary n timesteps at the end of the trajectory back to the
beginning of the buffers and wait for new samples coming in.
SampleBatches created by this method will be ready for postprocessing
by a Policy.
Args:
view_requirements (Dict[str, ViewRequirement]: The view
requirements dict needed to build the SampleBatch from the raw
buffers (which may have data shifts as well as mappings from
view-col to data-col in them).
Returns:
SampleBatch: The built SampleBatch for this agent, ready to go into
postprocessing.
"""
# TODO: measure performance gains when using a UsageTrackingDict
# instead of a SampleBatch for postprocessing (this would eliminate
# copies (for creating this SampleBatch) of many unused columns for
# no reason (not used by postprocessor)).
batch_data = {}
np_data = {}
for view_col, view_req in view_requirements.items():
# Create the batch of data from the different buffers.
data_col = view_req.data_col or view_col
# Some columns don't exist yet (get created during postprocessing).
# -> skip.
if data_col not in self.buffers:
continue
# OBS are already shifted by -1 (the initial obs starts one ts
# before all other data columns).
shift = view_req.data_rel_pos - \
(1 if data_col == SampleBatch.OBS else 0)
if data_col not in np_data:
np_data[data_col] = to_float_np_array(self.buffers[data_col])
# Shift is exactly 0: Send trajectory as is.
if shift == 0:
data = np_data[data_col][self.shift_before:]
# Shift is positive: We still need to 0-pad at the end here.
elif shift > 0:
data = to_float_np_array(
self.buffers[data_col][self.shift_before + shift:] + [
np.zeros(
shape=view_req.space.shape,
dtype=view_req.space.dtype) for _ in range(shift)
])
# Shift is negative: Shift into the already existing and 0-padded
# "before" area of our buffers.
else:
data = np_data[data_col][self.shift_before + shift:shift]
if len(data) > 0:
batch_data[view_col] = data
batch = SampleBatch(batch_data)
if SampleBatch.UNROLL_ID not in batch.data:
# TODO: (sven) Once we have the additional
# model.preprocess_train_batch in place (attention net PR), we
# should not even need UNROLL_ID anymore:
# Add "if SampleBatch.UNROLL_ID in view_requirements:" here.
batch.data[SampleBatch.UNROLL_ID] = np.repeat(
_AgentCollector._next_unroll_id, batch.count)
_AgentCollector._next_unroll_id += 1
# This trajectory is continuing -> Copy data at the end (in the size of
# self.shift_before) to the beginning of buffers and erase everything
# else.
if not self.buffers[SampleBatch.DONES][-1]:
# Copy data to beginning of buffer and cut lists.
if self.shift_before > 0:
for k, data in self.buffers.items():
self.buffers[k] = data[-self.shift_before:]
self.count = 0
return batch
def _build_buffers(self, single_row: Dict[str, TensorType]) -> None:
"""Builds the buffers for sample collection, given an example data row.
Args:
single_row (Dict[str, TensorType]): A single row (keys=column
names) of data to base the buffers on.
"""
for col, data in single_row.items():
if col in self.buffers:
continue
shift = self.shift_before - (1 if col in [
SampleBatch.OBS, SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
"env_id", "t"
] else 0)
# Python primitive or dict (e.g. INFOs).
if isinstance(data, (int, float, bool, str, dict)):
self.buffers[col] = [0 for _ in range(shift)]
# np.ndarray, torch.Tensor, or tf.Tensor.
else:
shape = data.shape
dtype = data.dtype
if torch and isinstance(data, torch.Tensor):
self.buffers[col] = \
[torch.zeros(shape, dtype=dtype, device=data.device)
for _ in range(shift)]
elif tf and isinstance(data, tf.Tensor):
self.buffers[col] = \
[tf.zeros(shape=shape, dtype=dtype)
for _ in range(shift)]
else:
self.buffers[col] = \
[np.zeros(shape=shape, dtype=dtype)
for _ in range(shift)]
class _PolicyCollector:
"""Collects already postprocessed (single agent) samples for one policy.
Samples come in through already postprocessed SampleBatches, which
contain single episode/trajectory data for a single agent and are then
appended to this policy's buffers.
"""
def __init__(self):
"""Initializes a _PolicyCollector instance."""
self.buffers: Dict[str, List] = collections.defaultdict(list)
# The total timestep count for all agents that use this policy.
# NOTE: This is not an env-step count (across n agents). AgentA and
# agentB, both using this policy, acting in the same episode and both
# doing n steps would increase the count by 2*n.
self.count = 0
def add_postprocessed_batch_for_training(
self, batch: SampleBatch,
view_requirements: Dict[str, ViewRequirement]) -> None:
"""Adds a postprocessed SampleBatch (single agent) to our buffers.
Args:
batch (SampleBatch): A single agent (one trajectory) SampleBatch
to be added to the Policy's buffers.
view_requirements (Dict[str, ViewRequirement]: The view
requirements for the policy. This is so we know, whether a
view-column needs to be copied at all (not needed for
training).
"""
for view_col, data in batch.items():
# TODO(ekl) how do we handle this for policies that don't extend
# Torch / TF Policy template (no inference of view reqs)?
# Skip columns that are not used for training.
# if view_col not in view_requirements or \
# not view_requirements[view_col].used_for_training:
# continue
self.buffers[view_col].extend(data)
# Add the agent's trajectory length to our count.
self.count += batch.count
def build(self):
"""Builds a SampleBatch for this policy from the collected data.
Also resets all buffers for further sample collection for this policy.
Returns:
SampleBatch: The SampleBatch with all thus-far collected data for
this policy.
"""
# Create batch from our buffers.
batch = SampleBatch(self.buffers)
assert SampleBatch.UNROLL_ID in batch.data
# Clear buffers for future samples.
self.buffers.clear()
# Reset count to 0.
self.count = 0
return batch
class _PolicyCollectorGroup:
def __init__(self, policy_map):
self.policy_collectors = {
pid: _PolicyCollector()
for pid in policy_map.keys()
}
self.count = 0
class _SimpleListCollector(_SampleCollector):
"""Util to build SampleBatches for each policy in a multi-agent env.
Input data is per-agent, while output data is per-policy. There is an M:N
mapping between agents and policies. We retain one local batch builder
per agent. When an agent is done, then its local batch is appended into the
corresponding policy batch for the agent's policy.
"""
def __init__(self,
policy_map: Dict[PolicyID, Policy],
clip_rewards: Union[bool, float],
callbacks: "DefaultCallbacks",
multiple_episodes_in_batch: bool = True,
rollout_fragment_length: int = 200):
"""Initializes a _SimpleListCollector instance.
Args:
policy_map (Dict[str, Policy]): Maps policy ids to policy
instances.
clip_rewards (Union[bool, float]): Whether to clip rewards before
postprocessing (at +/-1.0) or the actual value to +/- clip.
callbacks (DefaultCallbacks): RLlib callbacks.
"""
self.policy_map = policy_map
self.clip_rewards = clip_rewards
self.callbacks = callbacks
self.multiple_episodes_in_batch = multiple_episodes_in_batch
self.rollout_fragment_length = rollout_fragment_length
self.large_batch_threshold: int = max(
1000, rollout_fragment_length *
10) if rollout_fragment_length != float("inf") else 5000
# Whenever we observe a new episode+agent, add a new
# _SingleTrajectoryCollector.
self.agent_collectors: Dict[Tuple[EpisodeID, AgentID],
_AgentCollector] = {}
# Internal agent-key-to-policy-id map.
self.agent_key_to_policy_id = {}
# Pool of used/unused PolicyCollectorGroups (attached to episodes for
# across-episode multi-agent sample collection).
self.policy_collector_groups = []
# Agents to collect data from for the next forward pass (per policy).
self.forward_pass_agent_keys = {pid: [] for pid in policy_map.keys()}
self.forward_pass_size = {pid: 0 for pid in policy_map.keys()}
# Maps episode ID to the (non-built) env steps taken in this episode.
self.episode_steps: Dict[EpisodeID, int] = \
collections.defaultdict(int)
# Maps episode ID to MultiAgentEpisode.
self.episodes: Dict[EpisodeID, MultiAgentEpisode] = {}
@override(_SampleCollector)
def episode_step(self, episode_id: EpisodeID) -> None:
episode = self.episodes[episode_id]
self.episode_steps[episode_id] += 1
episode.length += 1
assert episode.batch_builder is not None
env_steps = episode.batch_builder.count
num_observations = sum(
c.count for c in episode.batch_builder.policy_collectors.values())
if num_observations > self.large_batch_threshold and \
log_once("large_batch_warning"):
logger.warning(
"More than {} observations in {} env steps for "
"episode {} ".format(num_observations, env_steps, episode_id) +
"are buffered in the sampler. If this is more than you "
"expected, check that that you set a horizon on your "
"environment correctly and that it terminates at some point. "
"Note: In multi-agent environments, `rollout_fragment_length` "
"sets the batch size based on (across-agents) environment "
"steps, not the steps of individual agents, which can result "
"in unexpectedly large batches." +
("Also, you may be waiting for your Env to "
"terminate (batch_mode=`complete_episodes`). Make sure it "
"does at some point."
if not self.multiple_episodes_in_batch else ""))
@override(_SampleCollector)
def add_init_obs(self, episode: MultiAgentEpisode, agent_id: AgentID,
env_id: EnvID, policy_id: PolicyID, t: int,
init_obs: TensorType) -> None:
# Make sure our mappings are up to date.
agent_key = (episode.episode_id, agent_id)
if agent_key not in self.agent_key_to_policy_id:
self.agent_key_to_policy_id[agent_key] = policy_id
else:
assert self.agent_key_to_policy_id[agent_key] == policy_id
policy = self.policy_map[policy_id]
view_reqs = policy.model.inference_view_requirements if \
getattr(policy, "model", None) else policy.view_requirements
# Add initial obs to Trajectory.
assert agent_key not in self.agent_collectors
# TODO: determine exact shift-before based on the view-req shifts.
self.agent_collectors[agent_key] = _AgentCollector()
self.agent_collectors[agent_key].add_init_obs(
episode_id=episode.episode_id,
agent_index=episode._agent_index(agent_id),
env_id=env_id,
t=t,
init_obs=init_obs,
view_requirements=view_reqs)
self.episodes[episode.episode_id] = episode
if episode.batch_builder is None:
episode.batch_builder = self.policy_collector_groups.pop() if \
self.policy_collector_groups else _PolicyCollectorGroup(
self.policy_map)
self._add_to_next_inference_call(agent_key)
@override(_SampleCollector)
def add_action_reward_next_obs(self, episode_id: EpisodeID,
agent_id: AgentID, env_id: EnvID,
policy_id: PolicyID, agent_done: bool,
values: Dict[str, TensorType]) -> None:
# Make sure, episode/agent already has some (at least init) data.
agent_key = (episode_id, agent_id)
assert self.agent_key_to_policy_id[agent_key] == policy_id
assert agent_key in self.agent_collectors
# Include the current agent id for multi-agent algorithms.
if agent_id != _DUMMY_AGENT_ID:
values["agent_id"] = agent_id
# Add action/reward/next-obs (and other data) to Trajectory.
self.agent_collectors[agent_key].add_action_reward_next_obs(values)
if not agent_done:
self._add_to_next_inference_call(agent_key)
@override(_SampleCollector)
def total_env_steps(self) -> int:
return sum(a.count for a in self.agent_collectors.values())
@override(_SampleCollector)
def get_inference_input_dict(self, policy_id: PolicyID) -> \
Dict[str, TensorType]:
policy = self.policy_map[policy_id]
keys = self.forward_pass_agent_keys[policy_id]
buffers = {k: self.agent_collectors[k].buffers for k in keys}
view_reqs = policy.model.inference_view_requirements if \
getattr(policy, "model", None) else policy.view_requirements
input_dict = {}
for view_col, view_req in view_reqs.items():
# Create the batch of data from the different buffers.
data_col = view_req.data_col or view_col
time_indices = \
view_req.data_rel_pos - (
1 if data_col in [SampleBatch.OBS, "t", "env_id",
SampleBatch.EPS_ID,
SampleBatch.AGENT_INDEX] else 0)
data_list = []
for k in keys:
if data_col not in buffers[k]:
self.agent_collectors[k]._build_buffers({
data_col: view_req.space.sample()
})
data_list.append(buffers[k][data_col][time_indices])
input_dict[view_col] = np.array(data_list)
self._reset_inference_calls(policy_id)
return input_dict
@override(_SampleCollector)
def postprocess_episode(self,
episode: MultiAgentEpisode,
is_done: bool = False,
check_dones: bool = False,
build: bool = False) -> None:
episode_id = episode.episode_id
policy_collector_group = episode.batch_builder
# TODO: (sven) Once we implement multi-agent communication channels,
# we have to resolve the restriction of only sending other agent
# batches from the same policy to the postprocess methods.
# Build SampleBatches for the given episode.
pre_batches = {}
for (eps_id, agent_id), collector in self.agent_collectors.items():
# Build only if there is data and agent is part of given episode.
if collector.count == 0 or eps_id != episode_id:
continue
pid = self.agent_key_to_policy_id[(eps_id, agent_id)]
policy = self.policy_map[pid]
pre_batch = collector.build(policy.view_requirements)
pre_batches[agent_id] = (policy, pre_batch)
# Apply reward clipping before calling postprocessing functions.
if self.clip_rewards is True:
for _, (_, pre_batch) in pre_batches.items():
pre_batch["rewards"] = np.sign(pre_batch["rewards"])
elif self.clip_rewards:
for _, (_, pre_batch) in pre_batches.items():
pre_batch["rewards"] = np.clip(
pre_batch["rewards"],
a_min=-self.clip_rewards,
a_max=self.clip_rewards)
post_batches = {}
for agent_id, (_, pre_batch) in pre_batches.items():
# Entire episode is said to be done.
# Error if no DONE at end of this agent's trajectory.
if is_done and check_dones and \
not pre_batch[SampleBatch.DONES][-1]:
raise ValueError(
"Episode {} terminated for all agents, but we still don't "
"don't have a last observation for agent {} (policy "
"{}). ".format(
episode_id, agent_id, self.agent_key_to_policy_id[(
episode_id, agent_id)]) +
"Please ensure that you include the last observations "
"of all live agents when setting done[__all__] to "
"True. Alternatively, set no_done_at_end=True to "
"allow this.")
# If (only this?) agent is done, erase its buffer entirely.
if pre_batch[SampleBatch.DONES][-1]:
del self.agent_collectors[(episode_id, agent_id)]
other_batches = pre_batches.copy()
del other_batches[agent_id]
pid = self.agent_key_to_policy_id[(episode_id, agent_id)]
policy = self.policy_map[pid]
if any(pre_batch["dones"][:-1]) or len(set(
pre_batch["eps_id"])) > 1:
raise ValueError(
"Batches sent to postprocessing must only contain steps "
"from a single trajectory.", pre_batch)
# Call the Policy's Exploration's postprocess method.
post_batches[agent_id] = pre_batch
if getattr(policy, "exploration", None) is not None:
policy.exploration.postprocess_trajectory(
policy, post_batches[agent_id],
getattr(policy, "_sess", None))
post_batches[agent_id] = policy.postprocess_trajectory(
post_batches[agent_id], other_batches, episode)
if log_once("after_post"):
logger.info(
"Trajectory fragment after postprocess_trajectory():\n\n{}\n".
format(summarize(post_batches)))
# Append into policy batches and reset.
from ray.rllib.evaluation.rollout_worker import get_global_worker
for agent_id, post_batch in sorted(post_batches.items()):
pid = self.agent_key_to_policy_id[(episode_id, agent_id)]
policy = self.policy_map[pid]
self.callbacks.on_postprocess_trajectory(
worker=get_global_worker(),
episode=episode,
agent_id=agent_id,
policy_id=pid,
policies=self.policy_map,
postprocessed_batch=post_batch,
original_batches=pre_batches)
# Add the postprocessed SampleBatch to the policy collectors for
# training.
policy_collector_group.policy_collectors[
pid].add_postprocessed_batch_for_training(
post_batch, policy.view_requirements)
env_steps = self.episode_steps[episode_id]
policy_collector_group.count += env_steps
if is_done:
del self.episode_steps[episode_id]
del self.episodes[episode_id]
# Make PolicyCollectorGroup available for more agent batches in
# other episodes. Do not reset count to 0.
self.policy_collector_groups.append(policy_collector_group)
else:
self.episode_steps[episode_id] = 0
# Build a MultiAgentBatch from the episode and return.
if build:
return self._build_multi_agent_batch(episode)
def _build_multi_agent_batch(self, episode: MultiAgentEpisode) -> \
Union[MultiAgentBatch, SampleBatch]:
ma_batch = {}
for pid, collector in episode.batch_builder.policy_collectors.items():
if collector.count > 0:
ma_batch[pid] = collector.build()
# Create the batch.
ma_batch = MultiAgentBatch.wrap_as_needed(
ma_batch, env_steps=episode.batch_builder.count)
# PolicyCollectorGroup is empty.
episode.batch_builder.count = 0
return ma_batch
@override(_SampleCollector)
def try_build_truncated_episode_multi_agent_batch(self) -> \
List[Union[MultiAgentBatch, SampleBatch]]:
batches = []
# Loop through ongoing episodes and see whether their length plus
# what's already in the policy collectors reaches the fragment-len.
for episode_id, episode in self.episodes.items():
env_steps = episode.batch_builder.count + \
self.episode_steps[episode_id]
# Reached the fragment-len -> We should build an MA-Batch.
if env_steps >= self.rollout_fragment_length:
assert env_steps == self.rollout_fragment_length
# If we reached the fragment-len only because of `episode_id`
# (still ongoing) -> postprocess `episode_id` first.
if episode.batch_builder.count < self.rollout_fragment_length:
self.postprocess_episode(episode, is_done=False)
# Build the MA-batch and return.
batch = self._build_multi_agent_batch(episode=episode)
batches.append(batch)
return batches
def _add_to_next_inference_call(
self, agent_key: Tuple[EpisodeID, AgentID]) -> None:
"""Adds an Agent key (episode+agent IDs) to the next inference call.
This makes sure that the agent's current data (in the trajectory) is
used for generating the next input_dict for a
`Policy.compute_actions()` call.
Args:
agent_key (Tuple[EpisodeID, AgentID]: A unique agent key (across
vectorized environments).
"""
pid = self.agent_key_to_policy_id[agent_key]
idx = self.forward_pass_size[pid]
if idx == 0:
self.forward_pass_agent_keys[pid].clear()
self.forward_pass_agent_keys[pid].append(agent_key)
self.forward_pass_size[pid] += 1
def _reset_inference_calls(self, policy_id: PolicyID) -> None:
"""Resets internal inference input-dict registries.
Calling `self.get_inference_input_dict()` after this method is called
would return an empty input-dict.
Args:
policy_id (PolicyID): The policy ID for which to reset the
inference pointers.
"""
self.forward_pass_size[policy_id] = 0
|
|
#! /usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import pandas as pd
#Table of elements with mcnp libraries
# Symbol Z Mass mcnp
_ELEMENTINFO = """H 1 1.008 1001.60c
He 2 4.003 2000.01
He-3 2 3.016 2003.60c
Li 3 6.940 3000.01
Be 4 9.013 4009.60c
B 5 10.820 5000.01
B-10 5 10.013 5010.74c
B-11 5 11.009 5011.74c
C 6 12.011 6000.60c
N 7 14.008 7014.60c
O 8 16.000 8016.60c
F 9 19.000 9019.60c
Na 11 22.991 11023.60c
Mg 12 24.320 12000.60c
Al 13 26.980 13027.60c
Si 14 28.090 14000.60c
P 15 30.975 15031.60c
S 16 32.066 16032.60c
Cl 17 35.457 17000.60c
K 19 39.100 19000.60c
Ca 20 40.080 20000.60c
Ti 22 47.900 22000.60c
V 23 50.950 23000.60c
Cr 24 52.010 24000.50c
Mn 25 54.940 25055.60c
Fe 26 55.850 26000.55c
Co 27 58.940 27059.60c
Ni 28 58.710 28000.50c
Cu 29 63.540 29000.50c
Zn 30 65.380 30000.40c
Br 35 79.916 35000.01
Sr 38 87.630 38000.01
Zr 40 91.220 40000.60c
Nb 41 92.910 41093.60c
Mo 42 95.950 42000.60c
Ag 47 107.880 47000.01
Cd 48 112.410 48000.50c
Sn 50 118.700 50000.42c
Cs 55 131.764 55133.60c
Ba 56 137.360 56138.60c
La 57 138.920 57000.01
Ce 58 140.130 58000.01
Sm 62 150.350 62000.01
Eu 63 152.000 63000.42c
Gd 64 157.250 64000.35c
Ta 73 180.950 73181.60c
W 74 183.860 74000.55c
Pb 82 207.210 82000.50c
Th 90 232.038 90232.74c
U 92 238.029 92000.01
U-234 92 238.029 92234.74c
U-235 92 238.029 92235.74c
U-238 92 238.029 92238.74c"""
ELEMENTS = {}
for line in _ELEMENTINFO.split('\n'):
items = line.strip().split()
ELEMENTS[items[0]] = dict(Z=int(items[1]),
A=float(items[2]),
mcnp=items[3])
# ELEMENTS is exposed directly, but here
# are some helper routines
def atomic_mass(element):
"Return the atomic mass of element 'element'"
return ELEMENTS[element]["A"]
def atomic_number(element):
"Return the atomic number of element 'element'"
return ELEMENTS[element]["Z"]
def mcnp_library(element):
"Return the mcnp library string for element 'element'"
return ELEMENTS[element]["mcnp"]
# Some natural (mole-fraction) abundances because MCNP5 and MCNP6 have dropped some natural
# libraries
ABUNDANCES = {'B-10': 0.199,
'B-11': 0.801,
'U-238': 0.99275,
'U-235': 0.0072,
'U-234': 0.00054,
}
# Regex to match an element (with an optional isotope tag)
# followed by an optional float
_RE_ELEMENTS = re.compile(r"""
([A-Z][a-z]?(?:-\d+)?) #element w/ opt isotope
\s* #opt whitespace
([-+]?\d*\.?\d+(?:[eE][-+]?\d+)?)? #opt weight
""", re.VERBOSE)
class ElementalComposition(dict):
"""Class for elemental compositions.
Uses elements in the composition as keys,
and each element's mole fraction as the value.
Can be initialized with a string,
such as 'C2H2OH', or with another
ElemnetalComposition instance (which
makes a copy), or with nothing,
in which case the composition is an empty dict.
Note: Parentheses are not allowed in a
formula string, but spaces may be used
to separate an element with an isotope tag
from its mole fraction.
>>> comp = ElementalComposition('B-10H3')
>>> print(comp)
{'H': 3.0, 'B-10': 1}
>>> comp = ElementalComposition('CaCO3MgCO3')
>>> print(comp)
{'Mg': 1, 'Ca': 1, 'C': 2, 'O': 6.0}
"""
def __init__(self, input=None):
not_a_dict = not_a_string = False
try:
elements = list(input.keys())
for key in keys:
self[key] = input[key]
except AttributeError:
not_a_dict = True
if type(input) is type('foo'):
self._parse_formula(input)
else:
not_a_string = True
if input and not_a_dict and not_a_string:
raise ValueError(
"Invalid input to ElementalComposition: "
"{0}".format(input))
@property
def molar_mass(self):
return sum(
ELEMENTS[x]['A']*self[x] for x in self)
def norm_fracs_to_one(self):
fracsum = sum(self[element] for element in self)
for element in self:
self[element] /= fracsum
def _parse_formula(self, formula):
for m in _RE_ELEMENTS.finditer(formula):
element = m.group(1)
wt = m.group(2)
if wt:
wt = float(wt)
else:
wt = 1
try:
self[element] += wt
except KeyError:
self[element] = wt
def separate_boron(self):
if 'B' in self:
for isotope in ('B-10', 'B-11'):
molefrac = ABUNDANCES[isotope]*self['B']
if isotope in self:
self[isotope] += molefrac
else:
self[isotope] = molefrac
del self['B']
def separate_uranium(self):
if 'U' in self:
for isotope in ('U-238', 'U-235', 'U-234'):
molefrac = ABUNDANCES[isotope]*self['U']
if isotope in self:
self[isotope] += molefrac
else:
self[isotope] = molefrac
del self['U']
def remove_zero_fracs(self):
empties = [isotope for isotope in self if self[isotope] == 0.0]
for isotope in empties:
del self[isotope]
def add_compositions_by_mole_fracs(comps, mole_fracs, norm=True):
"""Combine a list of compositions according to their mole fractions.
>>> comps = [ElementalComposition('CaCO3MgCO3')]
>>> comps.append(ElementalComposition('B-10'))
>>> fracs = (0.99902, 0.00092)
>>> total = add_compositions_by_mole_fracs(comps, fracs)
>>> print(total)
{'C': 0.19998158364627788, 'Mg': 0.099990791823138941, 'B-10': 9.2081768610526152e-05, 'Ca': 0.099990791823138941, 'O': 0.5999447509388337}
"""
sumcomp = ElementalComposition()
for comp, frac in zip(comps, mole_fracs):
for element in comp:
try:
sumcomp[element] += comp[element]*frac
except KeyError:
sumcomp[element] = comp[element]*frac
if norm:
sumcomp.norm_fracs_to_one()
return sumcomp
def add_compositions_by_mass_fracs(comps, mass_fracs, norm=True):
"""Combine a list of compositions according to their mass fractions.
>>> comps = [ElementalComposition('CaCO3MgCO3')]
>>> comps.append(ElementalComposition('B-10'))
>>> fracs = (0.99995, 0.00005)
>>> total = add_compositions_by_mass_fracs(comps, fracs)
>>> print(total)
{'C': 0.19998158251894854, 'Mg': 0.099990791259474271, 'B-10': 9.2087405257383425e-05, 'Ca': 0.099990791259474271, 'O': 0.59994474755684557}
"""
mole_fracs = _mass_fracs_to_mol_fracs(comps, mass_fracs)
return add_compositions_by_mole_fracs(comps, mole_fracs, norm)
def _mass_fracs_to_mol_fracs(comps, mass_fracs):
mole_fracs = [f/c.molar_mass for (c, f) in zip(comps,mass_fracs)]
tot = float(sum(mole_fracs))
return [f/tot for f in mole_fracs]
_CARD_HEADER = """c
c ===================================================================
c ==== Material # {0:d}
c ===================================================================
c Name = {1}
c Density = {2:.4f} g/cc
c"""
def get_material_card(name, density, composition, material_number=1):
"""Return a string containing an mcnp material card.
'name': name of the material
'density': density of the material in g/mL
'composition': An ElementComposition object describing the material.
'material_number': MCNP material number for the card.
>>> comps = [ElementalComposition('CaCO3MgCO3')]
>>> comps.append(ElementalComposition('B-10'))
>>> fracs = (0.99995, 0.00005)
>>> total = add_compositions_by_mass_fracs(comps, fracs)
>>> card = get_material_card('formation',2.851, total)
>>> print(card)
c
c ===================================================================
c ==== Material # 1
c ===================================================================
c Name = formation
c Density = 2.8510 g/cc
c
m1 6000.60c 0.199982 12000.60c 0.099991 5010.60c 0.000092
20000.60c 0.099991 8016.60c 0.599945
"""
header = _CARD_HEADER.format(material_number, name, density)
lines = header.split('\n')
m = "m{0:d}".format(material_number)
leader = "{0:>5}".format(m)
line = [leader]
composition.separate_boron() # no 5000 library in MCNP5 or MCNP6
composition.separate_uranium() # no 92000 library in MCNP5 or MCNP6
composition.remove_zero_fracs() # no need to list isotopes that aren't there
elements = list(composition.keys())
tmpdf = pd.DataFrame({'Z':[atomic_number(element) for element in elements],
'A':[atomic_mass(element) for element in elements]})
tmpdf.index = elements
sortdf = tmpdf.sort_values(['Z', 'A'])
sorted_elements = sortdf.index
#for n,element in enumerate(composition):
for n,element in enumerate(sorted_elements):
line.append("{0:>10} {1:.7e} ".format(
mcnp_library(element),
composition[element]))
if (n+1)%3 == 0:
lines.append("".join(line))
line = [" "]
remainder = "".join(line).rstrip()
if remainder:
lines.append(remainder)
return "\n".join(lines)
if __name__ == "__main__":
import pprint
pprint.pprint(ELEMENTS)
import doctest
doctest.testmod()
|
|
from __future__ import print_function
import tornado.web
from tornado import gen
import shlex # for calling spamc
from tornado.gen import Task, coroutine
import tornado.process
import tornado.web
import json
class MainHandler(tornado.web.RequestHandler):
def prepare(self):
self.PREDEFINED_HEADERS = {
'Content-Type': 'text/plain; charset=UTF-8',
'MIME-Version': 1.0
}
def _get_proc(self, full_report):
"""Get process object based on type of process
"""
STREAM = tornado.process.Subprocess.STREAM
if full_report:
# full report version
cur_proc = tornado.process.Subprocess(
shlex.split("spamc"),
stdin=STREAM,
stdout=STREAM,
stderr=STREAM
)
else:
# normal version
command = "spamc -c"
args = shlex.split(command)
cur_proc = tornado.process.Subprocess(
args, stdin=STREAM, stdout=STREAM, stderr=STREAM
)
return cur_proc
def _format_header_val(self, key, value):
""" format each header value
"""
try:
key = str(key).capitalize()
if isinstance(value, (list, tuple)):
out = key+": "
if len(value) == 0:
return out
for v in value:
out += str(v) + ", "
out = str(out[0:-2]) + "\n"
return out
else:
return key+": " + str(value) + "\n"
except:
return ""
def _get_predefined_headers(self):
out = ""
for k, v in self.PREDEFINED_HEADERS.items():
out += self._format_header_val(k, v)
return out
def _url_params_to_text(self, data):
out = ""
dont_include = ['message']
for k, v in data.items():
if k not in dont_include:
out += self._format_header_val(k, v)
return out
def _get_custom_headers(self, data):
"""convert all key-value pairs attained from the data
that are not 'message' into headers
"""
header = ""
header += self._get_predefined_headers()
header += self._url_params_to_text(data)
if data.get('email'):
header += self._format_header_val("From", data.get('email'))
if data.get('project_name'):
header += self._format_header_val("Subject",
data.get('project_name'))
return header
@coroutine
def call_spamassassin(self, data, full_report=False):
"""
Wrapper around subprocess call using Tornado's Subprocess class.
"""
message_with_header = self._get_custom_headers(data) \
+ "\n" \
+ str(data['message'])
stdin_data = str.encode(message_with_header)
cur_proc = self._get_proc(full_report)
yield Task(cur_proc.stdin.write, stdin_data)
cur_proc.stdin.close()
result, error = yield [
Task(cur_proc.stdout.read_until_close),
Task(cur_proc.stderr.read_until_close)
]
cur_proc.stdout.close()
cur_proc.stderr.close()
return result, error
def _handle_result(self, res):
"""return HAM if spam_assassin determines message is ham. else SPAM
"""
str_result = bytes.decode(res)
result_val = eval(str_result.strip())
if result_val < 1:
return json.dumps({
"decision": "HAM",
"message": "The given message is HAM"
})
else:
return json.dumps({
"decision": "SPAM",
"message": "The given message is SPAM"
})
def _file_to_data(self, file_contents):
"""Convert input data as a file into the data format
usable for call_spamassassin
"""
data = {}
lineno = 0
lines = file_contents.splitlines()
for l in lines:
lineno += 1
if l == "\n" or l == "":
break
parts = l.split(":")
if len(parts) > 1:
key = parts[0].rstrip('\n')
key = key.lower()
value = parts[1].rstrip('\n')
data[key] = value
message = '\n'.join(lines[lineno:])
message = message.rstrip('\n')
data['message'] = message
return data
@gen.coroutine
def post(self):
try:
data = None
if self.get_argument('is_file', False):
file_contents = self.request.files['file'][0]['body']
file_contents = file_contents.decode('utf-8', 'ignore')
data = self._file_to_data(file_contents)
else:
data = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'message' in data or self.get_argument('is_file', False):
if self.get_argument('full_report', False):
result, error = yield gen.Task(self.call_spamassassin,
data,
full_report=True)
self.write(result)
else:
result, error = yield gen.Task(self.call_spamassassin,
data,
full_report=False)
self.set_header("Content-Type", "application/json")
self.write(self._handle_result(result))
self.finish()
else:
self.write("No Message Given\n")
self.finish()
except:
self.set_status(400)
self.finish("Malformed Request")
class TeacherHandler(MainHandler):
def prepare(self):
self.PREDEFINED_HEADERS = {
'Content-Type': 'text/plain; charset=UTF-8',
'MIME-Version': 1.0
}
def _get_proc(self, is_spam):
""" create and return process object based on whether input is spam or ham
"""
STREAM = tornado.process.Subprocess.STREAM
command = "sudo ./call_sa-learn.sh"
if is_spam:
command += " --spam"
else:
command += " --ham"
args = shlex.split(command)
proc = tornado.process.Subprocess(
args, stdin=STREAM, stdout=STREAM, stderr=STREAM
)
return proc
@coroutine
def teach_spamassassin(self, data):
"""
teach spam assassin whether current message is spam or not.
NOTE:
http://askubuntu.com/questions/159007/how-do-i-run-specific-sudo-commands-without-a-password
sa-learn requires sudo. In this case, I just made it so that for this
specific command (sa-learn) we do not need to use sudo.
"""
cur_proc = self._get_proc(data.pop('is_spam'))
message_with_header = self._get_custom_headers(data) \
+ "\n" + str(data.get('message', ""))
stdin_data = str.encode(message_with_header)
yield Task(cur_proc.stdin.write, stdin_data)
cur_proc.stdin.close()
result, error = yield [
Task(cur_proc.stdout.read_until_close),
Task(cur_proc.stderr.read_until_close)
]
cur_proc.stdout.close()
cur_proc.stderr.close()
return result, error
@gen.coroutine
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
if 'message' in data:
result, error = yield gen.Task(self.teach_spamassassin, data)
if not error:
self.set_header("Content-Type", "application/json")
self.write(
json.dumps({
"status": "Learned",
"message": "Spam Assassin trained using given message"
})
)
self.finish()
else:
self.set_status(400)
self.finish("Malformed Request")
else:
self.write("No Message Given\n")
self.finish()
application = tornado.web.Application([
(r"/", MainHandler),
(r"/teach", TeacherHandler)
], debug=True, autoreload=True)
if __name__ == "__main__":
application.listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base64 import b64encode
from mutagen.flac import FLAC, Picture
from mutagen.id3 import (
APIC, ID3, MCDI, TALB, TCOM, TCON, TDRC, TIT2, TLEN, TPE1, TPE2, TPOS, TPUB, TRCK, UFID
)
from mutagentools.flac import to_json_dict
from mutagentools.flac.convert import (
convert_flac_to_id3,
convert_generic_to_txxx,
convert_encoder_to_txxx,
convert_encoded_by_to_txxx,
convert_encoder_settings_to_txxx,
convert_disc_number_to_tpos,
convert_track_number_to_trck,
convert_genre_to_tcon,
convert_length_to_tlen,
convert_mbid_to_ufid,
convert_album_to_talb,
convert_organization_to_tpub,
convert_albumartist_to_tpe2,
convert_artist_to_tpe1,
convert_date_to_tdrc,
convert_title_to_tit2,
convert_composer_to_tcom,
convert_picture_to_apic,
convert_toc_to_mcdi,
)
import json
import mock
import os
import six
import struct
import unittest
from mock import patch
class MainTestCase(unittest.TestCase):
def test_to_json_dict(self):
"""Tests formatting FLAC metadata as a JSON-compatible dict."""
fixture = FLAC(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures/fixture.flac'))
result = to_json_dict(fixture)
# test that the album was imported
self.assertIn('album', result.keys())
self.assertEqual(1, len(result.get('album')))
self.assertIn('Album', result.get('album'))
# test that both artists were imported
self.assertIn('artist', result.keys())
self.assertEqual(2, len(result.get('artist')))
self.assertIn('Artist 1', result.get('artist'))
self.assertIn('Artist 2', result.get('artist'))
# test that pictures aren't included by default
self.assertNotIn('pictures', result.keys())
def test_to_json_dict_pictures(self):
"""Tests formatting FLAC metadata as a JSON-compatible dict with pictures."""
fixture = FLAC(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures/fixture.flac'))
result = to_json_dict(fixture, include_pics=True)
self.assertIn('pictures', result.keys())
self.assertEqual(1, len(result.get('pictures')))
op = fixture.pictures[0]
p = result.get('pictures')[0]
# test the attributes
self.assertEqual(b64encode(op.data).decode('utf-8'), p.get('data'))
self.assertEqual(op.desc, p.get('desc'))
self.assertEqual(op.mime, p.get('mime'))
self.assertEqual(op.type, p.get('type'))
self.assertEqual('COVER_FRONT', p.get('type_friendly'))
def test_to_json_dict_flatten(self):
"""Tests formatting FLAC metadata as a JSON-compatible flat dictionary."""
fixture = FLAC(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures/fixture.flac'))
result = to_json_dict(fixture, flatten=True, include_pics=True)
# test that the album was flattened
self.assertIn('album', result.keys())
self.assertEqual('Album',result.get('album'))
# test that both artists were imported
self.assertIn('artist', result.keys())
self.assertEqual(2, len(result.get('artist')))
self.assertIn('Artist 1', result.get('artist'))
self.assertIn('Artist 2', result.get('artist'))
# test that the picture was not flattened
self.assertIsNotNone(result.get('pictures', None))
self.assertTrue(isinstance(result.get('pictures'), list))
self.assertEqual(1, len(result.get('pictures')))
class FullConversionTestCase(unittest.TestCase):
def test_convert_flac_to_id3(self):
"""Tests full conversion of a series of FLAC key-value pairs into an array of ID3 tags."""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures/sample-flac-tags.json")) as f:
fixture = json.load(f)
flac_mock = mock.MagicMock()
flac_mock.tags = fixture
# create mock pictures
cover_front = Picture()
cover_front.type = 3
cover_front.desc = 'Cover Front'
cover_front.mime = 'image/jpeg'
cover_front.data = [0x00] * 8
cover_back = Picture()
cover_back.type = 4
cover_back.desc = 'Cover Back'
cover_back.mime = 'image/jpeg'
cover_back.data = [0x00] * 8
flac_mock.pictures = [cover_front, cover_back]
result = convert_flac_to_id3(flac_mock)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, list))
# form it into a single ID3 object
id3 = ID3()
for tag in result:
id3.add(tag)
# album artist/artist/composer
self.assertEqual(fixture.get('albumartist'), id3.get('TPE2'))
self.assertEqual(fixture.get('artist'), id3.get('TPE1'))
self.assertEqual(fixture.get('composer'), id3.get('TCOM'))
# album/disk id/date/genre/publisher org
self.assertEqual(fixture.get('album'), id3.get('TALB'))
self.assertEqual(['1/1'], id3.get('TPOS'))
self.assertEqual(fixture.get('date'), id3.get('TDRC'))
self.assertEqual([fixture.get('genre')] + list(fixture.get('style')), id3.get('TCON'))
self.assertEqual([fixture.get('organization')], id3.get('TPUB'))
# album toc and musicbrainz id
self.assertIsNotNone(id3.get('MCDI'))
self.assertEqual(28, struct.unpack('>I', id3.get('MCDI').data[0:4])[0])
self.assertEqual(fixture.get('mbid').encode('ascii'), id3.get('UFID:http://musicbrainz.org').data)
# track/track number/length
self.assertEqual(fixture.get('title'), id3.get('TIT2'))
self.assertEqual(['01/28'], id3.get('TRCK'))
self.assertEqual(['152826'], id3.get('TLEN'))
# make sure that CRC got dropped
self.assertIsNone(id3.get('TXXX:crc'))
# encoding tags
self.assertEqual(fixture.get('encoder'), id3.get('TXXX:original encoder'))
self.assertEqual(fixture.get('encoded by'), id3.get('TXXX:originally encoded by'))
self.assertEqual(fixture.get('encoder settings'), id3.get('TXXX:original encoder settings'))
# test that miscellaneous tags got brought in
self.assertEqual(fixture.get('source'), id3.get('TXXX:source'))
self.assertEqual(fixture.get('profile'), id3.get('TXXX:profile'))
self.assertEqual(fixture.get('cddb disc id'), id3.get('TXXX:cddb disc id'))
self.assertEqual(fixture.get('accurateripdiscid'), id3.get('TXXX:accurateripdiscid'))
self.assertEqual(fixture.get('accurateripresult'), id3.get('TXXX:accurateripresult'))
# test that 'album artist' and 'author' are removed
self.assertNotIn('TXXX:album artist', id3.keys())
self.assertNotIn('TXXX:author', id3.keys())
# test that there's only a fixed number of TXXX tags there
self.assertEqual(8, len(list(filter(lambda t: t.FrameID == "TXXX", id3.values()))))
# test that pictures work
apic_list = list(filter(lambda t: t.FrameID == 'APIC', id3.values()))
apic_front = list(filter(lambda p: p.type == 3, apic_list))[0]
apic_back = list(filter(lambda p: p.type == 4, apic_list))[0]
self.assertEqual(2, len(apic_list))
# test front picture
self.assertEqual(cover_front.type, apic_front.type)
self.assertEqual(cover_front.mime, apic_front.mime)
self.assertEqual(cover_front.desc, apic_front.desc)
self.assertEqual(bytes(cover_front.data), apic_front.data)
# test back picture
self.assertEqual(cover_back.type, apic_back.type)
self.assertEqual(cover_back.mime, apic_back.mime)
self.assertEqual(cover_back.desc, apic_back.desc)
self.assertEqual(bytes(cover_back.data), apic_back.data)
def test_convert_flac_to_id3_track(self):
"""
Test that converting FLAC tags to ID3 tags for complicated tracknumber tags.
Sometimes we'll get a bullshit tracknumber tag which will be %d/%d which we will need to expand and resolve
properly into correct ID3 format.
"""
flac_mock = mock.MagicMock()
flac_mock.tags = { 'tracknumber': '1/5' }
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual(['01/05'], id3.get('TRCK'))
def test_convert_flac_to_id3_adds_tpos(self):
"""Test that convert_flac_to_id3 adds TPOS if not present."""
flac_mock = mock.MagicMock()
flac_mock.tags = {}
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual(['1/1'], id3.get('TPOS'))
def test_convert_flac_to_id3_duplicates(self):
"""Tests that variations or ordering of duplicated tags don't mess everything up."""
flac_mock = mock.MagicMock()
flac_mock.tags = {
'albumartist': 'Album Artist',
'artist': 'Artist',
'date': '2017'
}
# test if albumartist is present if it works
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual(['Album Artist'], id3.get('TPE2'))
self.assertEqual(['Artist'], id3.get('TPE1'))
self.assertEqual(2017, int(str(id3.get('TDRC').text[0])))
def test_convert_tracktotal(self):
"""Tests that converting a track number and total number of tracks is accomplished."""
tags = {
'tracknumber': '1',
'totaltracks': '3',
'tracktotal': '5',
}
flac_mock = mock.MagicMock()
flac_mock.tags = tags
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
# make sure that no TXXX tags are created
self.assertEqual(0, len(list(
filter(lambda f: f.FrameID == 'TXXX', id3.values()
))))
def test_convert_tracktotal_no_total(self):
"""Tests that total track numbers are detected properly."""
# test that the track got populated singularly
flac_mock = mock.MagicMock()
flac_mock.tags = { 'tracknumber': '1' }
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual('01', id3.get('TRCK'))
def test_convert_disctotal_no_total(self):
"""Tests that total disc numbers something something."""
# test that the track got populated singularly
flac_mock = mock.MagicMock()
flac_mock.tags = { 'discnumber': '1' }
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual('1', id3.get('TPOS'))
def test_convert_disctotal(self):
"""Tests that total disc numbers something something."""
# test that the track got populated singularly
flac_mock = mock.MagicMock()
flac_mock.tags = {
'discnumber': '1',
'totaldiscs': '3',
'disctotal': '5',
}
id3 = ID3()
list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
self.assertEqual('1/3', id3.get('TPOS'))
# make sure that no TXXX tags are created
self.assertEqual(0, len(list(
filter(lambda f: f.FrameID == 'TXXX', id3.values()
))))
class IndividualConversionTestCase(unittest.TestCase):
def test_convert_generic_to_txxx(self):
"""Test converting a generic FLAC Vorbis comment into a TXXX tag."""
key, value = "accurateripdiscid", "028-0030bb28-03c552e0-8b0b7f1c-1"
result = convert_generic_to_txxx(key, value)
self.assertIsNotNone(result)
self.assertEqual(key, result.desc)
self.assertEqual([value], result.text)
def test_convert_encoder_to_txxx(self):
"""Test converting an encoder tag to a TXXX tag."""
value = "FLAC 1.2.1"
result = convert_encoder_to_txxx(value)
self.assertIsNotNone(result)
self.assertEqual("original encoder", result.desc)
self.assertEqual([value], result.text)
def test_convert_encoded_by_to_txxx(self):
"""Test converting an encoded by tag to a TXXX tag."""
value = "dBpoweramp Release 14.2"
result = convert_encoded_by_to_txxx(value)
self.assertIsNotNone(result)
self.assertEqual("originally encoded by", result.desc)
self.assertEqual([value], result.text)
def test_convert_encoder_settings_to_txxx(self):
"""Test converting an encoder settings tag to a TXXX tag."""
value = "-compression-level-5 -verify"
result = convert_encoder_settings_to_txxx(value)
self.assertIsNotNone(result)
self.assertEqual("original encoder settings", result.desc)
self.assertEqual([value], result.text)
def test_convert_disc_number_to_tpos(self):
"""Test converting a FLAC disc number to TPOS."""
# first test with only a disc number
disc_number = "2"
result = convert_disc_number_to_tpos(disc_number)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TPOS))
self.assertEqual(["2"], result.text)
# test with disc number and total discs
disc_number = "2"
total_discs = "5"
result = convert_disc_number_to_tpos(disc_number, total_discs)
self.assertEqual(["2/5"], result.text)
def test_convert_tracknumber_to_trck(self):
"""Test converting a FLAC track number to TRCK."""
# first test with only a track number
track_number = "1"
result = convert_track_number_to_trck(track_number)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TRCK))
self.assertEqual(["01"], result.text)
# next, test with both track number and track count
track_number = "3"
total_tracks = "9"
result = convert_track_number_to_trck(track_number, total_tracks)
self.assertEqual(["03/09"], result.text)
# next, futz around with arrays
result = convert_track_number_to_trck([1], [13])
self.assertEqual(["01/13"], result.text)
def test_convert_genre_to_tcon(self):
"""Test converting a FLAC genre tag to a TCON ID3 tag."""
fixture = "Genre"
result = convert_genre_to_tcon(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TCON))
self.assertEqual([fixture], result.text)
fixture = ["Genre 1", "Genre 2"]
fixture_s = ["Style 1", "Style 2"]
result = convert_genre_to_tcon(fixture, fixture_s)
self.assertEqual(["Genre 1", "Genre 2", "Style 1", "Style 2"], result.text)
def test_convert_length_to_tlen(self):
"""Test converting a FLAC length tag to a TLEN ID3 tag."""
# test with single instance
fixture = 12345
result = convert_length_to_tlen(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TLEN))
self.assertEqual([str(fixture)], result.text)
# test with an array
fixture = ['12345']
result = convert_length_to_tlen(fixture)
self.assertEqual(fixture, result.text)
def test_convert_mbid_to_ufid(self):
"""Test converting a MusicBrainz ID to an ID3 UFID tag."""
fixture = "a56e6f46-f45b-4271-b389-904297463aaf"
result = convert_mbid_to_ufid(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, UFID))
self.assertEqual('http://musicbrainz.org', result.owner)
self.assertEqual(six.b(fixture), result.data)
def test_convert_album_to_talb(self):
"""Test converting a FLAC album to a TALB ID3 tag."""
fixture = "Album"
result = convert_album_to_talb(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TALB))
self.assertEqual([fixture], result.text)
def test_convert_organization_to_tpub(self):
"""Test converting a FLAC organization to a TPUB ID3 tag."""
fixture = "Organization"
result = convert_organization_to_tpub(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TPUB))
self.assertEqual([fixture], result.text)
def test_convert_albumartist_to_tpe2(self):
"""Test converting a FLAC album artist tag into a TPE2 ID3 tag."""
fixture = "Album Artist"
result = convert_albumartist_to_tpe2(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TPE2))
self.assertEqual([fixture], result.text)
def test_convert_artist_to_tpe1(self):
"""Test converting a FLAC artist tag into a TPE1 ID3 tag."""
fixture = "Artist 1"
result = convert_artist_to_tpe1(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TPE1))
self.assertEqual(result.text, [fixture])
# test multiple artists
fixture = ["Artist 1", "Artist 2"]
result = convert_artist_to_tpe1(fixture)
self.assertEqual(result.text, fixture)
def test_convert_date_to_tdrc(self):
"""Test converting a FLAC date tag into a TDRC ID3 tag."""
fixture = "2017"
result = convert_date_to_tdrc(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TDRC))
# mutagen renders this value as an ID3TimeStamp, so map it to a string
self.assertEqual(list(map(lambda i: str(i), result.text)), [fixture])
def test_convert_title_to_tit2(self):
"""Test converting a FLAC title tag into a TIT2 ID3 tag."""
fixture = "Title"
result = convert_title_to_tit2(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TIT2))
self.assertEqual(result.text, [fixture])
def test_convert_composer_to_tcom(self):
"""Tests converting a FLAC composer tag into a TCOM ID3 tag."""
# test single string
fixture = "Composer 1"
result = convert_composer_to_tcom(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, TCOM))
self.assertEqual(result.text, [fixture])
# test an array
fixture = ["Composer 1", "Composer 2"]
result = convert_composer_to_tcom(fixture)
self.assertEqual(result.text, fixture)
def test_convert_picture_to_apic(self):
"""Tests converting a FLAC picture to an APIC ID3 tag."""
fixture = Picture()
fixture.desc = "OMG DESC"
fixture.data = bytes([0x00] * 8)
fixture.mime = "image/jpeg"
fixture.type = 3
result = convert_picture_to_apic(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, APIC))
# test properties
self.assertEqual(fixture.desc, result.desc)
self.assertEqual(fixture.data, result.data)
self.assertEqual(fixture.mime, result.mime)
self.assertEqual(fixture.type, result.type)
def test_convert_toc_to_mcdi_str(self):
"""Tests converting a FLAC CDTOC to an MCDI ID3 tag via a string."""
fixture = "1C+96+2D5C+30DE+5B58+7F78+AB96+D9FE+DDC8+101B6+12A96+14C97+17183+17324+19F19+1C986+1E1DD+1E7F1+20524+221BE+22674+23809+26B9F+27F2F+2B19E+2D23F+2FA58+31C6E+3355F+35F04"
fixture_len = len(fixture.split("+"))
result = convert_toc_to_mcdi(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, MCDI))
# test length
self.assertEqual(4 + ((fixture_len - 1) * 8), len(bytearray(result.data)))
# test track count
self.assertEqual(28, struct.unpack('>I', result.data[0:4])[0])
# test that first track begins at sector 150
self.assertEqual(150, struct.unpack('>Q', result.data[4:12])[0])
def test_convert_toc_to_mcdi_bytes(self):
"""Tests converting a FLAC CDTOC to an MCDI ID3 tag via a byte array."""
fixture = b"1C+96+2D5C+30DE+5B58+7F78+AB96+D9FE+DDC8+101B6+12A96+14C97+17183+17324+19F19+1C986+1E1DD+1E7F1+20524+221BE+22674+23809+26B9F+27F2F+2B19E+2D23F+2FA58+31C6E+3355F+35F04"
fixture_len = len(fixture.split(b"+"))
result = convert_toc_to_mcdi(fixture)
self.assertIsNotNone(result)
self.assertTrue(isinstance(result, MCDI))
# test length
self.assertEqual(4 + ((fixture_len - 1) * 8), len(result.data))
# test track count
self.assertEqual(28, struct.unpack('>I', result.data[0:4])[0])
# test that first track begins at sector 150
self.assertEqual(150, struct.unpack('>Q', result.data[4:12])[0])
|
|
import itertools
import logging
import urlparse
import requests
from harvester.ext.crawler.base import CrawlerPluginBase
from harvester.utils import to_ordinal, report_progress
from . import DEFAULT_CLASSES
logger = logging.getLogger(__name__)
def safe_iter(iterable):
while True:
try:
yield iterable.next()
except StopIteration:
return
except:
logger.exception()
class ComunWebCrawler(CrawlerPluginBase):
"""
Crawler for "ComunWeb"-powered websites.
Will download data from a selection of object "classes",
download data contained in the page returned by requesting the
url in the "link" field and store them in the database,
as a document of "classIdentifier" type, keeping the original id
from the "objectId" field.
Objects stored in the storage have the following keys:
- classIdentifier:
the class type id, eg. ``"open_data"``
- dateModified:
Unix timestamp, as integer
- datePublished:
Unix timestamp, as integer
- fullUrl:
Link to HTTP page
- link:
Link to API/JSON metadata
- nodeId:
Numeric id of the node, eg. ``831248``
- nodeRemoteId:
16-byte hex string (32 char long) (md5 of something?)
- objectId:
Numeric id of the object, eg. ``849404``
- objectName:
title of the object, eg. ``"Rendiconto del 2013 (Open Data)"``
- objectRemoteId:
16-byte hex string (32 char long) (md5 of something?)
- full_metadata:
Metadata object, as returned by the ``link`` url.
Varies depending on the type of object (see json files in the
comunweb harvester ``docs`` folder).
"""
options = []
def fetch_data(self, storage, limit_classes=DEFAULT_CLASSES):
logger.info(u"Fetching data from comunweb: {0}".format(self.url))
classes = list(self._list_object_classes())
logger.debug(u'Available classes: {0}'.format(u', '.join(
x['identifier'] for x in classes)))
if limit_classes:
classes = [x for x in classes
if x['identifier'] in limit_classes]
logger.info(u'Selected {0} class(es)'.format(len(classes)))
logger.debug(u'Selected classes: {0}'.format(u', '.join(
x['identifier'] for x in classes)))
for clsinfo in classes:
# Each clsinfo has "identifier", "link", "name"
logger.info(u"Downloading data from class: {0} ({1}): {2}"
.format(clsinfo['identifier'],
clsinfo['name'],
clsinfo['link']))
resp = requests.get(clsinfo['link'])
_progress_total = int(resp.json()['metadata']['count'])
_progress_next = itertools.count(0).next
_progress_name = (u'Class: {0}'.format(clsinfo['identifier']),)
obj_type = clsinfo['identifier']
# Now iterate all the objects in this class
objects = self._scan_pages(clsinfo['link'])
for i, obj in enumerate(safe_iter(objects)):
report_progress(
_progress_name, _progress_next(), _progress_total,
'Downloading {0} #{1} [{2}]'.format(
obj_type, i, obj['nodeId']))
# Make sure objects are coherent
assert obj['classIdentifier'] == obj_type
node_id = obj['nodeId']
obj_id = obj['objectId']
object_name = obj['objectName']
link = obj['link']
# ===== NOTE =============================================
# Objects have two candidate keys: ``objectId`` and
# ``nodeId``; the latter is used in "link" urls so we
# now use it as primary object key; the former one can
# still be used to get metadata by requesting a URL
# like: /api/opendata/v1/content/object/<objectId> but
# apparently that contains slightly less information
# (node info and full url are missing)
# ========================================================
logger.debug(
u'Storing {seq} object of type "{obj_type}" '
u'nodeId={node_id}, objectId={obj_id}, title="{title}"'
.format(
seq=to_ordinal(i + 1),
obj_type=obj_type,
node_id=node_id,
obj_id=obj_id,
title=object_name))
try:
metadata = requests.get(link).json()
except:
logger.exception('Error getting metadata')
obj['full_metadata'] = None
else:
obj['full_metadata'] = metadata
# Store it by nodeId
storage.documents[obj_type][node_id] = obj
report_progress(
_progress_name, _progress_next(), _progress_total, 'All done')
def _list_object_classes(self):
"""
Return a list of available "object classes" for the crawled site.
Each item in the list is a dict like this::
{
"identifier": "open_data",
"link": "http://.../api/opendata/v1/content/class/open_data",
"name": "Open Data"
},
"""
response = requests.get(urlparse.urljoin(
self.url, '/api/opendata/v1/content/classList'))
assert response.ok
return response.json()['classes']
def _scan_pages(self, start_url):
"""
Keep downloading pages from a paged API request and yield
objects found in each page, until the end is reached.
Each yielded item is a dict like this::
{
"classIdentifier": "open_data",
"dateModified": 1399274108,
"datePublished": 1399240800,
"fullUrl": "http://www.comune.trento.it/Comune/Documenti/Bilanci/"
"Bilanci-di-rendicontazione/Rendiconti-di-gestione/"
"Rendiconto-del-2013/Rendiconto-del-2013-Open-Data",
"link": "http://www.comune.trento.it"
"/api/opendata/v1/content/node/831248",
"nodeId": 831248,
"nodeRemoteId": "dc04403fe707a2b5f36efba071bd119e",
"objectId": 849404,
"objectName": "Rendiconto del 2013 (Open Data)",
"objectRemoteId": "260e6a5ebdc2e6319f3353a0d9b2f5bd"
},
"""
offset, limit = 0, 50
while True:
page_url = '{0}/offset/{1}/limit/{2}'.format(
start_url.rstrip('/'), offset, limit)
response = requests.get(page_url)
nodes = response.json()['nodes']
if len(nodes) < 1:
# This was the last page
return
for item in nodes:
yield item
offset += 1 # for next page
|
|
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(object):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_context_data(self, **kwargs):
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
model = self.get_queryset().model
return model_forms.modelform_factory(model)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class ProcessFormView(View):
"""
A mixin that processes a form on POST.
"""
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argcomplete
import argparse
import logging
import pkg_resources
import sys
sys.path.append("../../")
from . import constant
from . import util
logging.basicConfig(level=logging.DEBUG)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--version",
action="version",
version=pkg_resources.require(constant.SDK_NAME)[0].version,
help="Show version")
main_subparser = parser.add_subparsers(dest="command_group", help="Commands")
init_parser = main_subparser.add_parser("init", help="Init cloudml config")
init_parser.set_defaults(func=util.init_config)
org_id_parser = main_subparser.add_parser(
"org_id", help="Get org_id by access_key and secret_key")
org_id_parser.set_defaults(func=util.get_org_id)
# subcommand: jobs
jobs_parser = main_subparser.add_parser("jobs", help="Commands about jobs")
jobs_subparser = jobs_parser.add_subparsers(
dest="job_command", help="Subcommands of jobs")
# subcommand of jobs: list
jobs_list_parser = jobs_subparser.add_parser("list", help="List jobs")
jobs_list_parser.set_defaults(func=util.list_jobs)
# subcommand of jobs: submit
jobs_submit_parser = jobs_subparser.add_parser("submit", help="Submit job")
jobs_submit_parser.add_argument(
"-f",
"--filename",
dest="filename",
help="The json file contains the job task msg")
jobs_submit_parser.add_argument(
"-n", "--job_name", dest="job_name", help="The job name")
jobs_submit_parser.add_argument(
"-m", "--module_name", dest="module_name", help="The module name")
jobs_submit_parser.add_argument(
"-u", "--trainer_uri", dest="trainer_uri", help="The trainer uri")
jobs_submit_parser.add_argument(
"-a", "--job_args", dest="job_args", help="The string of args")
jobs_submit_parser.add_argument(
"-c",
"--cpu_limit",
dest="cpu_limit",
help="The CPU limit with unit core")
jobs_submit_parser.add_argument(
"-M",
"--memory_limit",
dest="memory_limit",
help="The memory limit with unit K, M or G")
jobs_submit_parser.add_argument(
"-g", "--gpu_limit", dest="gpu_limit", help="The number of GPU limit")
jobs_submit_parser.add_argument(
"-p",
"--ps_count",
dest="ps_count",
help="The number of ps for distributed training")
jobs_submit_parser.add_argument(
"-w",
"--worker_count",
dest="worker_count",
help="The number of worker for distributed training")
jobs_submit_parser.add_argument(
"-d", "--docker_image", dest="docker_image", help="The docker image")
jobs_submit_parser.add_argument(
"-dc",
"--docker_command",
dest="docker_command",
help="The docker command")
jobs_submit_parser.add_argument(
"-F",
"--framework",
dest="framework",
help="The framework of machine learning")
jobs_submit_parser.add_argument(
"-V",
"--framework_version",
dest="framework_version",
help="The version of the framework")
jobs_submit_parser.add_argument(
"-vt", "--volume_type", dest="volume_type", help="The volume type")
jobs_submit_parser.add_argument(
"-vp", "--volume_path", dest="volume_path", help="The volume path")
jobs_submit_parser.add_argument(
"-mp", "--mount_path", dest="mount_path", help="The mount type")
jobs_submit_parser.add_argument(
"-mro",
"--mount_read_only",
dest="mount_read_only",
help="Whether mount read only or not")
jobs_submit_parser.add_argument(
"-pc",
"--prepare_command",
dest="prepare_command",
help="The prepare command")
jobs_submit_parser.add_argument(
"-fc",
"--finish_command",
dest="finish_command",
help="The finish command")
jobs_submit_parser.add_argument(
"-W", "--watch", action="store_true", help="Watch the status of job")
jobs_submit_parser.add_argument(
"-nsk",
"--node_selector_key",
dest="node_selector_key",
help="The node selector key")
jobs_submit_parser.add_argument(
"-nsv",
"--node_selector_value",
dest="node_selector_value",
help="The node selector value")
jobs_submit_parser.set_defaults(func=util.submit_job)
# subcommand of jobs: describe
jobs_describe_parser = jobs_subparser.add_parser(
"describe", help="Describe job")
jobs_describe_parser.add_argument("job_name", help="The job to describe")
jobs_describe_parser.set_defaults(func=util.describe_job)
# subcommand of jobs: logs
jobs_logs_parser = jobs_subparser.add_parser(
"logs", help="Get the logs of the job")
jobs_logs_parser.add_argument("job_name", help="The job to get the logs")
jobs_logs_parser.set_defaults(func=util.get_job_logs)
# subcommand of jobs: metrics
jobs_metrics_parser = jobs_subparser.add_parser(
"metrics", help="Get the metrics of the job")
jobs_metrics_parser.add_argument("job_name", help="The job to get the logs")
jobs_metrics_parser.set_defaults(func=util.get_job_metrics)
# subcommand of jobs: hp
jobs_hp_parser = jobs_subparser.add_parser(
"hp", help="Get the hyperparameters data of the job")
jobs_hp_parser.add_argument("job_name", help="The job name")
jobs_hp_parser.set_defaults(func=util.get_job_hyperparameters_data)
# subcommand of jobs: delete
jobs_delete_parser = jobs_subparser.add_parser(
"delete", help="Delete the job")
jobs_delete_parser.add_argument("job_name", help="The name of the job")
jobs_delete_parser.set_defaults(func=util.delete_job)
# subcommand of jobs: events
jobs_events_parser = jobs_subparser.add_parser(
"events", help="Get the events of the train job")
jobs_events_parser.add_argument("job_name", help="The name of the train job")
jobs_events_parser.set_defaults(func=util.get_train_job_events)
# subcommand: models
models_parser = main_subparser.add_parser(
"models", help="Commands about models")
models_subparser = models_parser.add_subparsers(
dest="models_command", help="Subcommands of models")
# subcommand of models: list
models_list_parser = models_subparser.add_parser(
"list", help="List model services")
models_list_parser.set_defaults(func=util.list_models)
# subcommand of models: create
models_create_parser = models_subparser.add_parser(
"create", help="Create model service")
models_create_parser.add_argument(
"-n",
"--model_name",
dest="model_name",
help="The name of the model",
required=True)
models_create_parser.add_argument(
"-v",
"--model_version",
dest="model_version",
help="The version of the model",
required=True)
models_create_parser.add_argument(
"-u",
"--model_uri",
dest="model_uri",
help="The uri of the model",
required=True)
models_create_parser.add_argument(
"-a", "--model_args", dest="model_args", help="The string of args")
models_create_parser.add_argument(
"-c",
"--cpu_limit",
dest="cpu_limit",
help="The CPU limit with unit core")
models_create_parser.add_argument(
"-M",
"--memory_limit",
dest="memory_limit",
help="The memory limit with unit K, M or G")
models_create_parser.add_argument(
"-g", "--gpu_limit", dest="gpu_limit", help="The number of GPU limit")
models_create_parser.add_argument(
"-d", "--docker_image", dest="docker_image", help="The docker image")
models_create_parser.add_argument(
"-dc",
"--docker_command",
dest="docker_command",
help="The docker command")
models_create_parser.add_argument(
"-F",
"--framework",
dest="framework",
help="The framework of machine learning")
models_create_parser.add_argument(
"-V",
"--framework_version",
dest="framework_version",
help="The version of the framework")
models_create_parser.add_argument(
"-r", "--replicas", dest="replicas", help="The num of replicas")
models_create_parser.add_argument(
"-pc",
"--prepare_command",
dest="prepare_command",
help="The prepare command")
models_create_parser.add_argument(
"-fc",
"--finish_command",
dest="finish_command",
help="The finish command")
models_create_parser.add_argument(
"-nsk",
"--node_selector_key",
dest="node_selector_key",
help="The node selector key")
models_create_parser.add_argument(
"-nsv",
"--node_selector_value",
dest="node_selector_value",
help="The node selector value")
models_create_parser.add_argument(
"-W", "--watch", action="store_true", help="Watch the status of model creation")
models_create_parser.set_defaults(func=util.create_model)
# subcommand of models: describe
models_describe_parser = models_subparser.add_parser(
"describe", help="Describe the model service")
models_describe_parser.add_argument(
"model_name", help="The name of the model")
models_describe_parser.add_argument(
"model_version", help="The version of the model")
models_describe_parser.set_defaults(func=util.describe_model)
# subcommand of models: update
models_update_parser = models_subparser.add_parser(
"update", help="Update the model service")
models_update_parser.add_argument(
"model_name", help="The name of the model")
models_update_parser.add_argument(
"model_version", help="The version of the model")
models_update_parser.add_argument(
"-r", "--replicas", dest="replicas", help="The num of replicas")
models_update_parser.set_defaults(func=util.update_model)
# subcommand of models: logs
models_logs_parser = models_subparser.add_parser(
"logs", help="Get the logs of the model service")
models_logs_parser.add_argument("model_name", help="The name of the model")
models_logs_parser.add_argument(
"model_version", help="The version of the model")
models_logs_parser.add_argument(
"-ri",
"--replica",
dest="replica_index",
help="The replica index"
)
models_logs_parser.set_defaults(func=util.get_model_logs)
# subcommand of models: metrics
models_metrics_parser = models_subparser.add_parser(
"metrics", help="Get the metrics of the model service")
models_metrics_parser.add_argument("model_name", help="The name of the model")
models_metrics_parser.add_argument(
"model_version", help="The version of the model")
models_metrics_parser.set_defaults(func=util.get_model_metrics)
# subcommand of models: delete
models_delete_parser = models_subparser.add_parser(
"delete", help="Delete the model service")
models_delete_parser.add_argument("model_name", help="The name of the model")
models_delete_parser.add_argument(
"model_version", help="The version of the model")
models_delete_parser.set_defaults(func=util.delete_model)
# subcommand of models: predict
models_predict_parser = models_subparser.add_parser(
"predict", help="Request the model service and predict")
models_predict_parser.add_argument(
"-n",
"--model_name",
dest="model_name",
help="The name of the model",
required=True)
models_predict_parser.add_argument(
"-v",
"--model_version",
dest="model_version",
help="The version of the model")
models_predict_parser.add_argument(
"-s", "--server", dest="server", help="The address of the server")
models_predict_parser.add_argument(
"-f",
"--filename",
dest="filename",
help="The json data file",
required=True)
models_predict_parser.add_argument(
"-t", "--timeout", dest="timeout", help="The timeout of gRPC request")
models_predict_parser.set_defaults(func=util.do_predict)
# subcommand of models: events
models_events_parser = models_subparser.add_parser(
"events", help="Get the events of the model service")
models_events_parser.add_argument(
"model_name", help="The name of the model service")
models_events_parser.add_argument(
"model_version", help="The version of the model service")
models_events_parser.set_defaults(func=util.get_model_service_events)
# subcommand: tensorboard
tensorboard_parser = main_subparser.add_parser(
"tensorboard", help="Commands about tensorboard")
tensorboard_subparser = tensorboard_parser.add_subparsers(
dest="tensorboard_command", help="Subcommands of tensorboard")
# subcommand of tensorboard: list
tensorboard_list_parser = tensorboard_subparser.add_parser(
"list", help="List tensorboards")
tensorboard_list_parser.set_defaults(func=util.list_tensorboard_services)
# subcommand of tensorboard: create
tensorboard_create_parser = tensorboard_subparser.add_parser(
"create", help="Create tensorboard")
tensorboard_create_parser.add_argument(
"-n",
"--tensorboard_name",
dest="tensorboard_name",
help="The name of the tensorboard",
required=True)
tensorboard_create_parser.add_argument(
"-l",
"--logdir",
dest="logdir",
help="The directory of tensorboard log",
required=True)
tensorboard_create_parser.add_argument(
"-d", "--docker_image", dest="docker_image", help="The docker image")
tensorboard_create_parser.add_argument(
"-dc",
"--docker_command",
dest="docker_command",
help="The docker command")
tensorboard_create_parser.add_argument(
"-F",
"--framework",
dest="framework",
help="The framework of machine learning")
tensorboard_create_parser.add_argument(
"-V",
"--framework_version",
dest="framework_version",
help="The version of the framework")
tensorboard_create_parser.add_argument(
"-nsk",
"--node_selector_key",
dest="node_selector_key",
help="The node selector key")
tensorboard_create_parser.add_argument(
"-nsv",
"--node_selector_value",
dest="node_selector_value",
help="The node selector value")
tensorboard_create_parser.set_defaults(func=util.create_tensorboard_service)
# subcommand of tensorboard: describe
tensorboard_describe_parser = tensorboard_subparser.add_parser(
"describe", help="Describe the tensorboard")
tensorboard_describe_parser.add_argument(
"tensorboard_name", help="The name of the tensorboard")
tensorboard_describe_parser.set_defaults(
func=util.describe_tensorboard_service)
# subcommand of tensorboard: delete
tensorboard_delete_parser = tensorboard_subparser.add_parser(
"delete", help="Delete the tensorboard")
tensorboard_delete_parser.add_argument(
"tensorboard_name", help="The name of the tensorboard")
tensorboard_delete_parser.set_defaults(func=util.delete_tensorboard_service)
# subcommand of tensorboard: events
tensorboard_events_parser = tensorboard_subparser.add_parser(
"events", help="Get the events of the tensorboard service")
tensorboard_events_parser.add_argument(
"tensorboard_name", help="The name of the tensorboard service")
tensorboard_events_parser.set_defaults(
func=util.get_tensorboard_service_events)
# subcommand: dev
dev_parser = main_subparser.add_parser("dev", help="Commands about dev")
dev_subparser = dev_parser.add_subparsers(
dest="dev_command", help="Subcommands of dev")
# subcommand of dev: list
dev_list_parser = dev_subparser.add_parser(
"list", help="List dev environments")
dev_list_parser.set_defaults(func=util.list_dev_envs)
# subcommand of dev: create
dev_create_parser = dev_subparser.add_parser(
"create", help="Create dev environment")
dev_create_parser.add_argument(
"-n",
"--dev_name",
dest="dev_name",
help="The dev environment name",
required=True)
dev_create_parser.add_argument(
"-p",
"--password",
dest="password",
help="The password of ipython notebook",
required=True)
dev_create_parser.add_argument(
"-c",
"--cpu_limit",
dest="cpu_limit",
help="The CPU limit with unit core")
dev_create_parser.add_argument(
"-M",
"--memory_limit",
dest="memory_limit",
help="The memory limit with unit K, M or G")
dev_create_parser.add_argument(
"-g", "--gpu_limit", dest="gpu_limit", help="The number of GPU limit")
dev_create_parser.add_argument(
"-d", "--docker_image", dest="docker_image", help="The ")
dev_create_parser.add_argument(
"-dc",
"--docker_command",
dest="docker_command",
help="The docker command")
dev_create_parser.add_argument(
"-F",
"--framework",
dest="framework",
help="The framework of machine learning")
dev_create_parser.add_argument(
"-V",
"--framework_version",
dest="framework_version",
help="The version of the framework")
dev_create_parser.add_argument(
"-nsk",
"--node_selector_key",
dest="node_selector_key",
help="The node selector key")
dev_create_parser.add_argument(
"-nsv",
"--node_selector_value",
dest="node_selector_value",
help="The node selector value")
dev_create_parser.add_argument(
"-W", "--watch", action="store_true", help="Watch the status of dev_env creation")
dev_create_parser.set_defaults(func=util.create_dev_env)
# subcommand of dev: describe
dev_describe_parser = dev_subparser.add_parser(
"describe", help="Describe the dev environment")
dev_describe_parser.add_argument(
"dev_name", help="The name of dev environment")
dev_describe_parser.set_defaults(func=util.describe_dev_env)
# subcommand of dev: delete
dev_delete_parser = dev_subparser.add_parser(
"delete", help="Delete the dev environment")
dev_delete_parser.add_argument(
"dev_name", help="The name of dev environment")
dev_delete_parser.set_defaults(func=util.delete_dev_env)
# subcommand of dev: events
dev_events_parser = dev_subparser.add_parser(
"events", help="Get the events of the dev environment")
dev_events_parser.add_argument(
"dev_name", help="The name of dev environment")
dev_events_parser.set_defaults(func=util.get_dev_env_events)
# subcommand of dev: metrics
dev_metrics_parser = dev_subparser.add_parser(
"metrics", help="Get the metrics of the dev environment")
dev_metrics_parser.add_argument(
"dev_name", help="The name of dev environment")
dev_metrics_parser.set_defaults(func=util.get_dev_env_metrics)
# subcommand: dev_server
dev_server_parser = main_subparser.add_parser(
"dev_server", help="Commands about dev_server")
dev_server_subparser = dev_server_parser.add_subparsers(
dest="dev_server_command", help="Subcommands of dev_server")
# subcommand of dev_server: list
dev_server_list_parser = dev_server_subparser.add_parser(
"list", help="List dev servers")
dev_server_list_parser.set_defaults(func=util.list_dev_servers)
# subcommand of dev_server: create
dev_server_create_parser = dev_server_subparser.add_parser(
"create", help="Create dev server")
dev_server_create_parser.add_argument(
"-n",
"--dev_name",
dest="dev_name",
help="The dev environment name",
required=True)
dev_server_create_parser.add_argument(
"-p",
"--password",
dest="password",
help="The password of ipython notebook",
required=True)
dev_server_create_parser.add_argument(
"-d", "--docker_image", dest="docker_image", help="The ")
dev_server_create_parser.add_argument(
"-dc",
"--docker_command",
dest="docker_command",
help="The docker command")
dev_server_create_parser.add_argument(
"-F",
"--framework",
dest="framework",
help="The framework of machine learning")
dev_server_create_parser.add_argument(
"-V",
"--framework_version",
dest="framework_version",
help="The version of the framework")
dev_server_create_parser.set_defaults(func=util.create_dev_server)
# subcommand of dev_server: describe
dev_server_describe_parser = dev_server_subparser.add_parser(
"describe", help="Describe the dev server")
dev_server_describe_parser.add_argument(
"dev_name", help="The name of dev server")
dev_server_describe_parser.set_defaults(func=util.describe_dev_server)
# subcommand of dev_server: delete
dev_server_delete_parser = dev_server_subparser.add_parser(
"delete", help="Delete the dev server")
dev_server_delete_parser.add_argument(
"dev_name", help="The name of dev server")
dev_server_delete_parser.set_defaults(func=util.delete_dev_server)
# subcommand of dev_server: events
dev_server_events_parser = dev_server_subparser.add_parser(
"events", help="Get the events of the dev server")
dev_server_events_parser.add_argument(
"dev_name", help="The name of dev server")
dev_server_events_parser.set_defaults(func=util.get_dev_server_events)
# subcommand: quota
quota_parser = main_subparser.add_parser(
"quota", help="Commands about quota")
quota_subparser = quota_parser.add_subparsers(
dest="quota_command", help="Subcommands of quota")
# subcommand of quota: list
quota_list_parser = quota_subparser.add_parser("list", help="List the quota")
quota_list_parser.set_defaults(func=util.list_quota)
# subcommand: framework
framework_parser = main_subparser.add_parser(
"framework", help="Commands about framework")
framework_subparser = framework_parser.add_subparsers(
dest="framework_command", help="Subcommands of framework")
# subcommand of framework: list
framework_list_parser = framework_subparser.add_parser(
"list", help="List the frameworks")
framework_list_parser.set_defaults(func=util.list_framework)
# subcommand: all
all_parser = main_subparser.add_parser(
"all", help="Commands about all")
all_subparser = all_parser.add_subparsers(
dest="all_command", help="Subcommands of all")
# subcommand of all: list
all_list_parser = all_subparser.add_parser(
"list", help="List all resources")
all_list_parser.set_defaults(func=util.list_all)
# For auto-complete
argcomplete.autocomplete(parser)
if len(sys.argv) == 1:
args = parser.parse_args(["-h"])
else:
args = parser.parse_args(sys.argv[1:])
args.func(args)
if __name__ == "__main__":
main()
|
|
import asyncio
import logging
from asyncio.coroutines import iscoroutine, coroutine
from functools import partial
import aiohttp
from again.utils import unique_hex
from retrial.retrial.retry import retry
from .exceptions import ClientNotFoundError, ClientDisconnected
from .packet import ControlPacket, MessagePacket
from .protocol_factory import get_trellio_protocol
from .services import TCPServiceClient, HTTPServiceClient
HTTP = 'http'
TCP = 'tcp'
def _retry_for_pub(result):
return not result
def _retry_for_exception(_):
return True
class HTTPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response
class TCPBus:
def __init__(self, registry_client):
registry_client.conn_handler = self
self._registry_client = registry_client
self._client_protocols = {}
self._pingers = {}
self._node_clients = {}
self._service_clients = []
self.tcp_host = None
self.http_host = None
self._host_id = unique_hex()
self._ronin = False
self._registered = False
self._logger = logging.getLogger(__name__)
def _create_service_clients(self):
futures = []
for sc in self._service_clients:
for host, port, node_id, service_type in self._registry_client.get_all_addresses(*sc.properties):
if service_type == 'tcp':
self._node_clients[node_id] = sc
future = self._connect_to_client(host, node_id, port, service_type, sc)
futures.append(future)
return asyncio.gather(*futures, return_exceptions=False)
def connect(self):
clients = self.tcp_host.clients if self.tcp_host else self.http_host.clients
for client in clients:
if isinstance(client, (TCPServiceClient, HTTPServiceClient)):
client.tcp_bus = self
self._service_clients = clients
yield from self._registry_client.connect()
def register(self):
if self.tcp_host:
self._registry_client.register(self.tcp_host.host, self.tcp_host.port, self.tcp_host.name,
self.tcp_host.version, self.tcp_host.node_id, self.tcp_host.clients, 'tcp')
if self.http_host:
self._registry_client.register(self.http_host.host, self.http_host.port, self.http_host.name,
self.http_host.version, self.http_host.node_id, self.http_host.clients,
'http')
def registration_complete(self):
if not self._registered:
self._create_service_clients()
self._registered = True
def new_instance(self, service, version, host, port, node_id, type):
sc = next(sc for sc in self._service_clients if sc.name == service and sc.version == version)
if type == 'tcp':
self._node_clients[node_id] = sc
asyncio.ensure_future(self._connect_to_client(host, node_id, port, type, sc))
def send(self, packet: dict):
packet['from'] = self._host_id
func = getattr(self, '_' + packet['type'] + '_sender')
wrapper_func = func
if not iscoroutine(func):
wrapper_func = coroutine(func)
asyncio.ensure_future(wrapper_func(packet))
# @retry((ClientDisconnected, ClientNotFoundError))
@retry(should_retry_for_result=lambda x: not x, should_retry_for_exception=lambda x: True, timeout=None,
max_attempts=5, multiplier=2)
def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
node_id = self._get_node_id_for_packet(packet)
client_protocol = self._client_protocols.get(node_id)
if node_id and client_protocol:
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True
else:
self._logger.error('Client protocol is not connected for packet %s', packet)
raise ClientDisconnected()
else:
# No node found to send request
self._logger.error('Out of %s, Client Not found for packet %s, restarting server...',
self._client_protocols.keys(), packet)
raise ClientNotFoundError()
def _connect_to_client(self, host, node_id, port, service_type, service_client):
future = asyncio.ensure_future(
asyncio.get_event_loop().create_connection(partial(get_trellio_protocol, service_client), host, port,
ssl=service_client._ssl_context))
future.add_done_callback(
partial(self._service_client_connection_callback, self._node_clients[node_id], node_id, service_type))
return future
def _service_client_connection_callback(self, sc, node_id, service_type, future):
_, protocol = future.result()
# TODO : handle pinging
# if service_type == TCP:
# pinger = Pinger(self, asyncio.get_event_loop())
# self._pingers[node_id] = pinger
# pinger.register_tcp_service(protocol, node_id)
# asyncio.ensure_future(pinger.start_ping())
self._client_protocols[node_id] = protocol # stores connection(sockets)
@staticmethod
def _create_json_service_name(app, service, version):
return {'app': app, 'name': service, 'version': version}
@staticmethod
def _handle_ping(packet, protocol):
protocol.send(ControlPacket.pong(packet['node_id']))
def _handle_pong(self, node_id, count):
pinger = self._pingers[node_id]
asyncio.ensure_future(pinger.pong_received(count))
def _get_node_id_for_packet(self, packet):
service, version, entity = packet['name'], packet['version'], packet['entity']
node = self._registry_client.resolve(service, version, entity, TCP)
return node[2] if node else None
def handle_ping_timeout(self, node_id):
self._logger.info("Service client connection timed out {}".format(node_id))
self._pingers.pop(node_id, None)
service_props = self._registry_client.get_for_node(node_id)
self._logger.info('service client props {}'.format(service_props))
if service_props is not None:
host, port, _node_id, _type = service_props
asyncio.ensure_future(self._connect_to_client(host, _node_id, port, _type))
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'ping':
self._handle_ping(packet, protocol)
elif packet['type'] == 'pong':
self._handle_pong(packet['node_id'], packet['count'])
elif packet['type'] == 'publish':
self._handle_publish(packet, protocol)
else:
if self.tcp_host.is_for_me(packet['name'], packet['version']):
func = getattr(self, '_' + packet['type'] + '_receiver')
func(packet, protocol)
else:
self._logger.warning('wrongly routed packet: ', packet)
def _request_receiver(self, packet, protocol):
api_fn = None
try:
api_fn = getattr(self.tcp_host, packet['endpoint'])
except AttributeError:
pass
if not api_fn:
for view in self.tcp_host.tcp_views:
_api_fn = None
try:
_api_fn = getattr(view, packet['endpoint'])
except AttributeError:
pass
if _api_fn:
api_fn = _api_fn
break
if api_fn.is_api:
from_node_id = packet['from']
entity = packet['entity']
future = asyncio.ensure_future(api_fn(from_id=from_node_id, entity=entity, **packet['payload']))
def send_result(f):
result_packet = f.result()
protocol.send(result_packet)
future.add_done_callback(send_result)
else:
print('no api found for packet: ', packet)
def _handle_publish(self, packet, protocol):
service, version, endpoint, payload, publish_id = (packet['name'], packet['version'], packet['endpoint'],
packet['payload'], packet['publish_id'])
for client in self._service_clients:
if client.name == service and client.version == version:
fun = getattr(client, endpoint)
asyncio.ensure_future(fun(payload))
protocol.send(MessagePacket.ack(publish_id))
def handle_connected(self):
if self.tcp_host:
self.tcp_host.initiate()
if self.http_host:
self.http_host.initiate()
# class PubSubBus:
# PUBSUB_DELAY = 5
#
# def __init__(self, pubsub_host, pubsub_port, registry_client, ssl_context=None):
# self._host = pubsub_host
# self._port = pubsub_port
# self._pubsub_handler = None
# self._registry_client = registry_client
# self._clients = None
# self._pending_publishes = {}
# self._ssl_context = ssl_context
#
# def create_pubsub_handler(self):
# self._pubsub_handler = PubSub(self._host, self._port)
# yield from self._pubsub_handler.connect()
#
# def register_for_subscription(self, host, port, node_id, clients):
# self._clients = clients
# subscription_list = []
# xsubscription_list = []
# for client in clients:
# if isinstance(client, TCPServiceClient):
# for each in dir(client):
# fn = getattr(client, each)
# if callable(fn) and getattr(fn, 'is_subscribe', False):
# subscription_list.append(self._get_pubsub_key(client.name, client.version, fn.__name__))
# elif callable(fn) and getattr(fn, 'is_xsubscribe', False):
# xsubscription_list.append((client.name, client.version, fn.__name__, getattr(fn, 'strategy')))
# self._registry_client.x_subscribe(host, port, node_id, xsubscription_list)
# yield from self._pubsub_handler.subscribe(subscription_list, handler=self.subscription_handler)
#
# def publish(self, service, version, endpoint, payload):
# endpoint_key = self._get_pubsub_key(service, version, endpoint)
# asyncio.ensure_future(self._pubsub_handler.publish(endpoint_key, json.dumps(payload, cls=TrellioEncoder)))
# asyncio.ensure_future(self.xpublish(service, version, endpoint, payload))
#
# def xpublish(self, service, version, endpoint, payload):
# subscribers = yield from self._registry_client.get_subscribers(service, version, endpoint)
# strategies = defaultdict(list)
# for subscriber in subscribers:
# strategies[(subscriber['name'], subscriber['version'])].append(
# (subscriber['host'], subscriber['port'], subscriber['node_id'], subscriber['strategy']))
# for key, value in strategies.items():
# publish_id = str(uuid.uuid4())
# future = asyncio.ensure_future(
# self._connect_and_publish(publish_id, service, version, endpoint, value, payload))
# self._pending_publishes[publish_id] = future
#
# def receive(self, packet, transport, protocol):
# if packet['type'] == 'ack':
# future = self._pending_publishes.pop(packet['request_id'], None)
# if future:
# future.cancel()
# transport.close()
#
# def subscription_handler(self, endpoint, payload):
# service, version, endpoint = endpoint.split('/')
# client = [sc for sc in self._clients if (sc.name == service and sc.version == version)][0]
# func = getattr(client, endpoint)
# asyncio.ensure_future(func(**json.loads(payload)))
#
# @staticmethod
# def _get_pubsub_key(service, version, endpoint):
# return '/'.join((service, str(version), endpoint))
#
# def _connect_and_publish(self, publish_id, service, version, endpoint, subscribers, payload):
# if subscribers[0][3] == 'LEADER':
# host, port = subscribers[0][0], subscribers[0][1]
# else:
# random_metadata = random.choice(subscribers)
# host, port = random_metadata[0], random_metadata[1]
# transport, protocol = yield from asyncio.get_event_loop().create_connection(
# partial(get_trellio_protocol, self), host, port)
# packet = MessagePacket.publish(publish_id, service, version, endpoint, payload)
# protocol.send(packet)
# yield from asyncio.sleep(self.PUBSUB_DELAY)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'batch_norm',
'bias_add',
'conv2d',
'convolution2d',
'dropout',
'flatten',
'fully_connected',
'linear',
'max_pool2d',
'one_hot_encoding',
'relu',
'relu6',
'stack',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu']
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
outputs_collections=None,
scope=None):
"""Adds a Avg Pooling op.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with ops.op_scope([inputs], scope, 'AvgPool2D') as sc:
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
outputs = nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of size `[batch_size, height, width, channels]`
or `[batch_size, channels]`.
decay: decay for the moving average.
center: If True, subtract `beta`. If False, `beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Optional activation function.
updates_collections: collections to collect the update ops for computation.
If None, a control dependency would be added to make sure the updates are
computed.
is_training: whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
a tensor representing the output of the operation.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BatchNorm', reuse=reuse) as sc:
inputs_shape = inputs.get_shape()
dtype = inputs.dtype.base_dtype
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer,
trainable=False,
collections=moving_variance_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = nn.moments(inputs, axis, shift=moving_mean)
# Update the moving_mean and moving_variance moments.
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
if updates_collections is None:
# Make sure the updates are computed here.
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
else:
# Collect the updates to be computed later.
ops.add_to_collections(updates_collections, update_moving_mean)
ops.add_to_collections(updates_collections, update_moving_variance)
outputs = nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
else:
outputs = nn.batch_normalization(
inputs, moving_mean, moving_variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BiasAdd', reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
`convolution2d` creates a variable called `weights`, representing the
convolutional kernel, that is convolved with the `inputs` to produce a
`Tensor` of activations. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the activations. Finally, if `activation_fn` is not `None`,
it is applied to the activations as well.
Args:
inputs: a 4-D tensor `[batch_size, height, width, channels]`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 2 `[kernel_height, kernel_width]` of
of the filters. Can be an int if both values are the same.
stride: a list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of `VALID` or `SAME`.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionay containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_op_scope`.
Returns:
a tensor representing the output of the operation.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'Conv', reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.op_scope([inputs], scope, 'Dropout') as sc:
is_training = ops.convert_to_tensor(is_training)
outputs = control_flow_ops.cond(
is_training,
lambda: nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with ops.op_scope([inputs], scope, 'Flatten') as sc:
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer, the number of output units in the layer.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`. If
`normalize_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
normalizer_params: normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
the tensor variable representing the result of the series of operations.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, int):
raise ValueError('num_outputs should be integer, got %s.', num_outputs)
with variable_scope.variable_op_scope([inputs],
scope,
'fully_connected',
reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
num_input_units = utils.last_dimension(inputs.get_shape(), min_rank=2)
static_shape = inputs.get_shape().as_list()
static_shape[-1] = num_outputs
out_shape = array_ops.unpack(array_ops.shape(inputs))
out_shape[-1] = num_outputs
weights_shape = [num_input_units, num_outputs]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
if len(static_shape) > 2:
# Reshape inputs
inputs = array_ops.reshape(inputs, [-1, num_input_units])
outputs = standard_ops.matmul(inputs, weights)
if normalizer_fn:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if len(static_shape) > 2:
# Reshape back outputs
outputs = array_ops.reshape(outputs, array_ops.pack(out_shape))
outputs.set_shape(static_shape)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
outputs_collections=None,
scope=None):
"""Adds a Max Pooling op.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with ops.op_scope([inputs], scope, 'MaxPool2D') as sc:
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
outputs = nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using tf.one_hot.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with ops.op_scope([labels, num_classes], scope, 'OneHotEncoding') as sc:
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
x = fully_connected(x, 128, scope='fc/fc_3')
```
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer(inputs, *args, **kwargs)
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
outputs = inputs
scope = scope or layer.__name__
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer,
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: A function that requires a single Tensor that is applied as a
non-linearity. If None is used, do not apply any activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_op_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: if x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_op_scope([x], name, 'fully_connected'):
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unpack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.pack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias for convolution2d.
conv2d = convolution2d
|
|
#!/usr/bin/env python
#
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
from io import StringIO
from pyparsing import Word, ParseException, ParseFatalException, alphanums
try:
from fragments import FragmentFile, FRAGMENT_TYPES, Fragment, KeyGrammar
from sdkconfig import SDKConfig
except ImportError:
sys.path.append('../')
from fragments import FragmentFile, FRAGMENT_TYPES, Fragment, KeyGrammar
from sdkconfig import SDKConfig
class SampleFragment(Fragment):
grammars = {
"key_1": KeyGrammar(Word(alphanums + "_").setResultsName("value"), 0, None, True),
"key_2": KeyGrammar(Word(alphanums + "_").setResultsName("value"), 0, None, False),
"key_3": KeyGrammar(Word(alphanums + "_").setResultsName("value"), 3, 5, False)
}
def set_key_value(self, key, parse_results):
if key == "key_1":
self.key_1 = list()
for result in parse_results:
self.key_1.append(result["value"])
elif key == "key_2":
self.key_2 = list()
for result in parse_results:
self.key_2.append(result["value"])
def get_key_grammars(self):
return self.__class__.grammars
FRAGMENT_TYPES["test"] = SampleFragment
class FragmentTest(unittest.TestCase):
def setUp(self):
self.sdkconfig = SDKConfig("data/Kconfig", "data/sdkconfig")
@staticmethod
def create_fragment_file(contents, name="test_fragment.lf"):
f = StringIO(contents)
f.name = name
return f
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
value_2 # comments should be ignored
value_3
# this is a comment as well
key_2: value_a
# this is the last comment
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments[0].key_1), 3)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[0].key_1[1], "value_2")
self.assertEqual(fragment_file.fragments[0].key_1[2], "value_3")
self.assertEqual(len(fragment_file.fragments[0].key_2), 1)
self.assertEqual(fragment_file.fragments[0].key_2[0], "value_a")
def test_duplicate_keys(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1: value_1
key_1: value_a
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_empty_key(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_conditional(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
if A = y:
value_2
value_3
if A = n:
value_4
if B = n:
value_5
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[0].key_1[1], "value_2")
self.assertEqual(fragment_file.fragments[0].key_1[2], "value_3")
self.assertEqual(fragment_file.fragments[0].key_1[3], "value_5")
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
if B = y:
value_2
elif C = y:
value_3
elif A = y:
value_4
else:
value_5
value_6
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[0].key_1[1], "value_3")
self.assertEqual(fragment_file.fragments[0].key_1[2], "value_6")
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
if A = y:
value_2
if B = y:
value_3
else:
value_4
if C = y:
value_5
value_6
value_7
key_2:
value_a
if B != y:
value_b
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[0].key_1[1], "value_2")
self.assertEqual(fragment_file.fragments[0].key_1[2], "value_4")
self.assertEqual(fragment_file.fragments[0].key_1[3], "value_5")
self.assertEqual(fragment_file.fragments[0].key_1[4], "value_6")
self.assertEqual(fragment_file.fragments[0].key_1[5], "value_7")
self.assertEqual(fragment_file.fragments[0].key_2[0], "value_a")
self.assertEqual(fragment_file.fragments[0].key_2[1], "value_b")
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
if A = n:
value_2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments[0].key_1), 0)
def test_empty_file(self):
test_fragment = self.create_fragment_file(u"""
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 0)
def test_setting_indent(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
value_2
value_3
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments[0].key_1), 3)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[0].key_1[1], "value_2")
self.assertEqual(fragment_file.fragments[0].key_1[2], "value_3")
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_1
value_2 # first element dictates indent
value_3
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_values_num_limit(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_3:
value_1
value_2
value_3
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_3:
value_1
value_2
value_3
value_4
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 1)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_3:
value_1
value_2
value_3
value_4
value_5
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 1)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_3:
value_1
value_2
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_3:
value_1
value_2
value_3
value_4
value_5
value_6
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_unsupported_key(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
value_a
key_4:
value_1
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_empty_fragment(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_empty_conditional(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
if B = y:
else:
value_1
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
if B = y:
value_1
else B = y:
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
if B = y:
value_1
elif B = y:
else:
value_2
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_out_of_order_conditional(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
elif B = y:
value_1
else:
value_2
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[test:test]
key_1:
else:
value_2
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_required_keys(self):
test_fragment = self.create_fragment_file(u"""
[test:test]
key_2:
value_1
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_multiple_fragments(self):
test_fragment = self.create_fragment_file(u"""
[test:test1]
key_1:
value_1
[test:test2]
key_1:
value_2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 2)
self.assertEqual(fragment_file.fragments[0].key_1[0], "value_1")
self.assertEqual(fragment_file.fragments[1].key_1[0], "value_2")
def test_whole_conditional_fragment(self):
test_fragment = self.create_fragment_file(u"""
if B = y:
[test:test1]
key_1:
value_1
else:
[test:test2]
key_1:
value_2
if A = y:
[test:test3]
key_1:
value_3
if C = y:
value_6
[test:test4]
key_1:
value_4
[test:test5]
key_1:
value_5
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 4)
self.assertEqual(fragment_file.fragments[0].name, "test2")
self.assertEqual(fragment_file.fragments[1].name, "test3")
self.assertEqual(fragment_file.fragments[1].key_1[1], "value_6")
self.assertEqual(fragment_file.fragments[2].name, "test4")
self.assertEqual(fragment_file.fragments[3].name, "test5")
def test_equivalent_conditional_fragment(self):
test_fragment1 = self.create_fragment_file(u"""
if A = y:
[test:test1]
key_1:
value_1
else:
[test:test2]
key_1:
value_2
""")
fragment_file1 = FragmentFile(test_fragment1, self.sdkconfig)
self.assertEqual(len(fragment_file1.fragments), 1)
self.assertEqual(fragment_file1.fragments[0].key_1[0], "value_1")
test_fragment2 = self.create_fragment_file(u"""
[test:test1]
key_1:
if A = y:
value_1
else:
value_2
""")
fragment_file2 = FragmentFile(test_fragment2, self.sdkconfig)
self.assertEqual(len(fragment_file2.fragments), 1)
self.assertEqual(fragment_file2.fragments[0].key_1[0], "value_1")
class SectionsTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
.section1
.section2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries, {".section1", ".section2"})
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
.section1
.section2
.section3
.section2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries, {".section1", ".section2", ".section3"})
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if B = y:
.section1
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
class SchemeTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1 -> target1
sections2 -> target2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{("sections1", "target1"),
("sections2", "target2")})
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1 -> target1
sections2 -> target2
sections2 -> target2
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{("sections1", "target1"),
("sections2", "target2")})
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
if B = y:
sections1 -> target1
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_improper_grammar(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1, target1 # improper separator
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
class MappingTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:symbol (noflash)
obj (noflash)
obj:symbol_2 (noflash)
obj_2 (noflash)
* (noflash)
""")
expected = {("obj", "symbol", "noflash"),
("obj", None, "noflash"),
("obj", "symbol_2", "noflash"),
("obj_2", None, "noflash"),
("*", None, "noflash")}
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_archive(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
entries:
* (default)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib1.a
lib2.a
entries:
* (default)
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
if B = y:
* (noflash) # if condition is false, then no 'entries' key value
""")
expected = set()
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
obj:symbol (noflash)
obj:symbol (noflash)
""")
expected = {("obj", "symbol", "noflash")}
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_invalid_grammar(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
entries:
* (default)
""")
with self.assertRaises(ParseFatalException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj: (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj: ()
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:symbol
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
(noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:* (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
:symbol (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
*:symbol (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
class DeprecatedMappingTest(FragmentTest):
def test_valid_grammar(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
obj:symbol (noflash)
# Comments should not matter
obj (noflash)
# Nor should whitespace
obj : symbol_2 ( noflash )
obj_2 ( noflash )
* (noflash)
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual("lib.a", fragment_file.fragments[0].archive)
self.assertEqual("lib_a", fragment_file.fragments[0].name)
expected = {("obj", "symbol", "noflash"),
("obj", None, "noflash"),
("obj", "symbol_2", "noflash"),
("obj_2", None, "noflash"),
("*", None, "noflash")
}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_explicit_blank_default_w_others(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = n
obj_a (noflash)
: default
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("*", None, "default")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_implicit_blank_default_w_others(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = n
obj_a (noflash)
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("*", None, "default")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_explicit_blank_default(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: default
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("*", None, "default")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_implicit_blank_default(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: default
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("*", None, "default")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_multiple_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = n
obj_a1 (noflash)
obj_a2 (noflash)
: B = n
obj_b1 (noflash)
obj_b2 (noflash)
obj_b3 (noflash)
: C = n
obj_c1 (noflash)
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("obj_b1", None, "noflash"),
("obj_b2", None, "noflash"),
("obj_b3", None, "noflash")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_blank_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = n
obj_a (noflash)
: B = n
: C = n
obj_c (noflash)
: default
obj (noflash)
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
expected = {("*", None, "default")}
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_blank_first_condition(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
obj_a (noflash)
: CONFIG_B = y
obj_b (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_nonlast_default_1(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: default
obj_a (noflash)
: CONFIG_A = y
obj_A (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_nonlast_default_2(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = y
obj_A (noflash)
: default
obj_a (noflash)
: B = y
obj_B (noflash
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_nonlast_default_3(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = y
obj_A (noflash)
:
obj_a (noflash)
: B = y
obj_B (noflash
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_duplicate_default_1(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: CONFIG_A = y
obj_A (noflash)
: default
obj_a (noflash)
: CONFIG_B = y
obj_B (noflash)
: default
obj_a (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_duplicate_default_2(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: CONFIG_A = y
obj_A (noflash)
: CONFIG_B = y
obj_a (noflash)
: default
obj_B (noflash)
:
obj_a (noflash)
""")
with self.assertRaises(ParseException):
FragmentFile(test_fragment, self.sdkconfig)
def test_mixed_deprecated_mapping(self):
test_fragment = self.create_fragment_file(u"""
[mapping]
archive: lib.a
entries:
: A = n
obj_A (noflash)
: default
obj_B (noflash)
[mapping:test]
archive: lib.a
entries:
if A = n:
obj_A (noflash)
else:
obj_B (noflash)
""")
fragment_file = FragmentFile(test_fragment, self.sdkconfig)
self.assertEqual(2, len(fragment_file.fragments))
self.assertEqual(fragment_file.fragments[0].entries,
fragment_file.fragments[1].entries)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""Compound ZIP parser plugin for OpenXML files."""
from __future__ import unicode_literals
import re
import zipfile
from xml.parsers import expat
from defusedxml import ElementTree
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import czip
from plaso.parsers.czip_plugins import interface
class OpenXMLEventData(events.EventData):
"""OXML event data.
Attributes:
app_version (str): version of application that created document.
author (str): name of author.
creating_app (str): name of application that created document.
doc_security (str): ???
hyperlinks_changed (bool): True if hyperlinks have changed.
i4 (str): ???
last_saved_by (str): name of user that last saved the document.
links_up_to_date (bool): True if the links are up to date.
number_of_characters (int): number of characters without spaces in
the document.
number_of_characters_with_spaces (int): number of characters including
spaces in the document.
number_of_lines (int): number of lines in the document.
number_of_pages (int): number of pages in the document.
number_of_paragraphs (int): number of paragraphs in the document.
number_of_words (int): number of words in the document.
revision_number (int): revision number.
scale_crop (bool): True if crop to scale is enabled.
shared_doc (bool): True if document is shared.
template (str): name of template ???
total_time (str): ???
"""
DATA_TYPE = 'metadata:openxml'
def __init__(self):
"""Initializes event data."""
super(OpenXMLEventData, self).__init__(data_type=self.DATA_TYPE)
self.app_version = None
self.author = None
self.creating_app = None
self.doc_security = None
self.hyperlinks_changed = None
self.i4 = None
self.last_saved_by = None
self.links_up_to_date = None
self.number_of_characters = None
self.number_of_characters_with_spaces = None
self.number_of_lines = None
self.number_of_pages = None
self.number_of_paragraphs = None
self.number_of_words = None
self.revision_number = None
self.scale_crop = None
self.shared_doc = None
self.template = None
self.total_time = None
class OpenXMLPlugin(interface.CompoundZIPPlugin):
"""Parse metadata from OXML files."""
NAME = 'oxml'
DATA_FORMAT = 'OpenXML (OXML) file'
REQUIRED_PATHS = frozenset(
['[Content_Types].xml', '_rels/.rels', 'docProps/core.xml'])
_PROPERTY_NAMES = {
'creator': 'author',
'lastModifiedBy': 'last_saved_by',
'Total_Time': 'total_edit_time',
'Pages': 'number_of_pages',
'CharactersWithSpaces': 'number_of_characters_with_spaces',
'Paragraphs': 'number_of_paragraphs',
'Characters': 'number_of_characters',
'Lines': 'number_of_lines',
'revision': 'revision_number',
'Words': 'number_of_words',
'Application': 'creating_app',
'Shared_Doc': 'shared',
}
def _GetPropertyValue(self, parser_mediator, properties, property_name):
"""Retrieves a property value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
properties (dict[str, object]): properties.
property_name (str): name of the property.
Returns:
str: property value.
"""
property_value = properties.get(property_name, None)
if isinstance(property_value, bytes):
try:
# TODO: get encoding form XML metadata.
property_value = property_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to decode property: {0:s}'.format(property_name))
return property_value
def _FormatPropertyName(self, property_name):
"""Formats a camel case property name as snake case.
Args:
property_name (str): property name in camel case.
Returns:
str: property name in snake case.
"""
# TODO: Add Unicode support.
fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', property_name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', fix_key).lower()
def _ParsePropertiesXMLFile(self, xml_data):
"""Parses a properties XML file.
Args:
xml_data (bytes): data of a _rels/.rels XML file.
Returns:
dict[str, object]: properties.
Raises:
zipfile.BadZipfile: if the properties XML file cannot be read.
"""
xml_root = ElementTree.fromstring(xml_data)
properties = {}
for xml_element in xml_root.iter():
if not xml_element.text:
continue
# The property name is formatted as: {URL}name
# For example: {http://purl.org/dc/terms/}modified
_, _, name = xml_element.tag.partition('}')
# Do not including the 'lpstr' attribute because it is very verbose.
if name == 'lpstr':
continue
property_name = self._PROPERTY_NAMES.get(name, None)
if not property_name:
property_name = self._FormatPropertyName(name)
properties[property_name] = xml_element.text
return properties
def _ParseRelationshipsXMLFile(self, xml_data):
"""Parses the relationships XML file (_rels/.rels).
Args:
xml_data (bytes): data of a _rels/.rels XML file.
Returns:
list[str]: property file paths. The path is relative to the root of
the ZIP file.
Raises:
zipfile.BadZipfile: if the relationship XML file cannot be read.
"""
xml_root = ElementTree.fromstring(xml_data)
property_files = []
for xml_element in xml_root.iter():
type_attribute = xml_element.get('Type')
if 'properties' in repr(type_attribute):
target_attribute = xml_element.get('Target')
property_files.append(target_attribute)
return property_files
def _ProduceEvent(
self, parser_mediator, event_data, properties, property_name,
timestamp_description, error_description):
"""Produces an event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
event_data (OpenXMLEventData): event data.
properties (dict[str, object]): properties.
property_name (str): name of the date and time property.
timestamp_description (str): description of the meaning of the timestamp
value.
error_description (str): description of the meaning of the timestamp
value for error reporting purposes.
"""
time_string = properties.get(property_name, None)
if not time_string:
return
# Date and time strings are in ISO 8601 format either with 1 second
# or 100th nano second precision. For example:
# 2012-11-07T23:29:00Z
# 2012-03-05T20:40:00.0000000Z
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(time_string)
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported {0:s}: {1:s} with error: {2!s}'.format(
error_description, time_string, exception))
def InspectZipFile(self, parser_mediator, zip_file):
"""Parses an OXML file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
zip_file (zipfile.ZipFile): the zip file containing OXML content. It is
not be closed in this method, but will be closed by the parser logic
in czip.py.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
xml_data = zip_file.read('_rels/.rels')
property_files = self._ParseRelationshipsXMLFile(xml_data)
except (IndexError, IOError, KeyError, LookupError, OverflowError,
ValueError, ElementTree.ParseError, expat.ExpatError,
zipfile.BadZipfile) as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse relationships XML file: _rels/.rels with error: '
'{0!s}').format(exception))
return
metadata = {}
for path in property_files:
try:
xml_data = zip_file.read(path)
properties = self._ParsePropertiesXMLFile(xml_data)
except (IndexError, IOError, KeyError, LookupError, OverflowError,
ValueError, ElementTree.ParseError, expat.ExpatError,
zipfile.BadZipfile) as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse properties XML file: {0:s} with error: '
'{1!s}').format(path, exception))
continue
metadata.update(properties)
event_data = OpenXMLEventData()
event_data.app_version = self._GetPropertyValue(
parser_mediator, metadata, 'app_version')
event_data.app_version = self._GetPropertyValue(
parser_mediator, metadata, 'app_version')
event_data.author = self._GetPropertyValue(
parser_mediator, metadata, 'author')
event_data.creating_app = self._GetPropertyValue(
parser_mediator, metadata, 'creating_app')
event_data.doc_security = self._GetPropertyValue(
parser_mediator, metadata, 'doc_security')
event_data.hyperlinks_changed = self._GetPropertyValue(
parser_mediator, metadata, 'hyperlinks_changed')
event_data.i4 = self._GetPropertyValue(
parser_mediator, metadata, 'i4')
event_data.last_saved_by = self._GetPropertyValue(
parser_mediator, metadata, 'last_saved_by')
event_data.links_up_to_date = self._GetPropertyValue(
parser_mediator, metadata, 'links_up_to_date')
event_data.number_of_characters = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_characters')
event_data.number_of_characters_with_spaces = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_characters_with_spaces')
event_data.number_of_lines = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_lines')
event_data.number_of_pages = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_pages')
event_data.number_of_paragraphs = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_paragraphs')
event_data.number_of_words = self._GetPropertyValue(
parser_mediator, metadata, 'number_of_words')
event_data.revision_number = self._GetPropertyValue(
parser_mediator, metadata, 'revision_number')
event_data.scale_crop = self._GetPropertyValue(
parser_mediator, metadata, 'scale_crop')
event_data.shared_doc = self._GetPropertyValue(
parser_mediator, metadata, 'shared_doc')
event_data.template = self._GetPropertyValue(
parser_mediator, metadata, 'template')
event_data.total_time = self._GetPropertyValue(
parser_mediator, metadata, 'total_time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'created',
definitions.TIME_DESCRIPTION_CREATION, 'creation time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'modified',
definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time')
self._ProduceEvent(
parser_mediator, event_data, metadata, 'last_printed',
definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')
czip.CompoundZIPParser.RegisterPlugin(OpenXMLPlugin)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import re
import stat
import optparse
import shutil
import imp
import codecs
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from glob import glob
import atexit
# We have some warnings because we reimport some libs. We don't want them to be shown at install
import warnings
PY3 = sys.version_info >= (3,)
if PY3:
basestring = str # no basestring in python 3
def _disable_warns(*args, **kwargs):
pass
warnings.showwarning = _disable_warns
# will fail under 2.5 python version, but if really you have such a version in
# prod you are a morron and we can't help you
python_version = sys.version_info
if python_version < (2, 6):
sys.exit("OpsBro require as a minimum Python 2.6, sorry")
# elif python_version >= (3,):
# sys.exit("OpsBro is not yet compatible with Python 3.x, sorry")
package_data = ['*.py']
# Is this setup.py call for a pypi interaction? if true, won't hook lot of things
is_pypi_register_upload = ('register' in sys.argv or ('sdist' in sys.argv and 'upload' in sys.argv))
if is_pypi_register_upload:
print("Pypi specal mode activated, skipping some black magic")
if '-v' not in sys.argv:
sys.argv.append('-v')
# Is it a first step installation for pip? (egg_info stuff)
is_pip_first_step = 'egg_info' in sys.argv
# Last step for pip insta an install one (at least in pip 9.0.1)
is_pip_real_install_step = 'bdist_wheel' in sys.argv
# Black magic install:
# * copy /etc
# * look for dependencies from system packages
# * hide setup.py part
# If not black kmagic (like in pip first step, or pypi interaction (upload, etc)
# we do not want any black magic thing, and we try to behave like a standard python package ^^
# By default we love black magic, but if we are in a pip special call or pypi, we disable it
allow_black_magic = not is_pypi_register_upload and not is_pip_first_step
# We will need to allow a debug of the orig_sys_argv
orig_sys_argv = sys.argv[:]
################################## Utility functions for files
# helper function to read the README file
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname), 'r', 'utf8').read()
# Do a chmod -R +x
def _chmodplusx(d):
if not os.path.exists(d):
return
if os.path.isdir(d):
for item in os.listdir(d):
p = os.path.join(d, item)
if os.path.isdir(p):
_chmodplusx(p)
else:
st = os.stat(p)
os.chmod(p, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
else:
st = os.stat(d)
os.chmod(d, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
################################## Hook the ugly python setup() call that can output gcc warnings.
# NOTE: yes, there is os.dup and file descriptor things. Deal with it (r).
# Keep a trace of the old stdout, because setup() is just toooooooooo verbose when succeed
stdout_orig = sys.stdout
stderr_orig = sys.stderr
stdout_catched = StringIO()
stderr_redirect_path = '/tmp/stderr.opsbro.tmp' if os.name != 'nt' else r'c:\stderr.opsbro.tmp'
stderr_redirect = None
stderr_orig_bkp = None
def hook_stdout():
global stderr_redirect
# Do not hook if we are uploading to pypi
if not allow_black_magic:
return
sys.stdout = stdout_catched
sys.stderr = stdout_catched
# Also hook raw stderr
stderr_redirect = open(stderr_redirect_path, 'w')
os.dup2(stderr_redirect.fileno(), 2)
# Unhook stdout, put back fd=0
def unhook_stdout():
global stderr_redirect
# For pypi, we did not hook it
if not allow_black_magic:
return
# If we have something in the file descriptor 2, reinject into stderr
stderr_redirect.close()
# with open(stderr_redirect_path, 'r') as f:
# stdout_catched.write(f.read())
sys.stdout = stdout_orig
sys.stderr = stderr_orig
################################## Parse arguments, especially for pip install arg catched
parser = optparse.OptionParser("%prog [options]", version="%prog ")
parser.add_option('--root', dest="proot", metavar="ROOT", help='Root dir to install, usefull only for packagers')
parser.add_option('--upgrade', '--update', dest="upgrade", action='store_true', help='Only upgrade')
parser.add_option('--install-scripts', dest="install_scripts", help='Path to install the opsbro binary')
parser.add_option('--skip-build', dest="skip_build", action='store_true', help='skipping build')
parser.add_option('-O', type="int", dest="optimize", help='skipping build')
parser.add_option('--record', dest="record", help='File to save writing files. Used by pip install only')
parser.add_option('--single-version-externally-managed', dest="single_version", action='store_true', help='This option is for pip only')
old_error = parser.error
def _error(msg):
pass
parser.error = _error
opts, args = parser.parse_args()
# reenable the errors for later use
parser.error = old_error
root = opts.proot or ''
################################## Detect install or Update
prev_version = None
prev_path = ''
# We try to see if we are in a full install or an update process
is_update = False
# Try to import opsbro but not the local one. If available, we are in
# and upgrade phase, not a classic install
try:
if '.' in sys.path:
sys.path.remove('.')
if os.path.abspath('.') in sys.path:
sys.path.remove(os.path.abspath('.'))
if '' in sys.path:
sys.path.remove('')
import opsbro as opsbro_test_import
is_update = True
# Try to guess version
from opsbro.info import VERSION as prev_version
prev_path = os.path.dirname(opsbro_test_import.__file__)
del opsbro_test_import
# But to be sure future opsbro import will load the new one, we need to
# first hard unload the opsbro modules from python
# NOTE: this is only ok because we are in the setup.py, don't do this outside this scope!
all_modules = list(sys.modules.keys())
for modname in all_modules:
if modname == 'opsbro' or modname.startswith('opsbro.'):
del sys.modules[modname]
except ImportError as exp: # great, first install so
pass
# Now look at loading the local opsbro lib for version and banner
my_dir = os.path.dirname(os.path.abspath(__file__))
opsbro = imp.load_module('opsbro', *imp.find_module('opsbro', [os.path.realpath(my_dir)]))
from opsbro.info import VERSION, BANNER, TXT_BANNER
from opsbro.log import cprint, is_tty, sprintf, core_logger
from opsbro.misc.bro_quotes import get_quote
from opsbro.systempacketmanager import get_systepacketmgr
from opsbro.cli_display import print_h1
from opsbro.characters import CHARACTERS
systepacketmgr = get_systepacketmgr()
################################## Only root as it's a global system tool.
if os.name != 'nt' and os.getuid() != 0:
cprint('Setup must be launched as root.', color='red')
sys.exit(2)
# By default logger should not print anything
core_logger.setLevel('ERROR')
# By maybe we are in verbose more?
if '-v' in sys.argv or os.environ.get('DEBUG_INSTALL', '0') == '1':
core_logger.setLevel('DEBUG')
core_logger.debug('SCRIPT: install/update script was call with arguments: %s' % orig_sys_argv)
what = 'Installing' if not is_update else 'Updating'
title = sprintf('%s' % what, color='magenta', end='') + sprintf(' OpsBro to version ', end='') + sprintf('%s' % VERSION, color='magenta', end='')
if allow_black_magic:
print_h1(title, raw_title=False)
################################## Start to print to the user
if allow_black_magic:
# If we have a real tty, we can print the delicious banner with lot of BRO
if is_tty():
cprint(BANNER)
else: # ok you are poor, just got some ascii art then
cprint(TXT_BANNER)
# Also print a Bro quote
quote, from_film = get_quote()
cprint(' >> %s (%s)\n' % (quote, from_film), color='grey')
if allow_black_magic:
if is_update:
cprint(' Previous OpsBro lib detected on this system:')
cprint(' * location: ', end='')
cprint(prev_path, color='blue')
cprint(' * version : ', end='')
cprint('%s' % prev_version, color='blue')
cprint(' * Using the ', end='')
cprint('update process', color='magenta')
print('')
if '--update' in args or opts.upgrade or '--upgrade' in args:
if 'update' in args:
sys.argv.remove('update')
sys.argv.insert(1, 'install')
if '--update' in args:
sys.argv.remove('--update')
if '--upgrade' in args:
sys.argv.remove('--upgrade')
is_update = True
# install: if we are with setupy.py install, or maybe with pip launch (last step)
is_install = False
if not is_update and 'install' in args or is_pip_real_install_step:
is_install = True
install_scripts = opts.install_scripts or ''
# setup() will warn about unknown parameter we already managed
# to delete them
deleting_args = ['--skip-build']
to_del = []
for a in deleting_args:
for av in sys.argv:
if av.startswith(a):
idx = sys.argv.index(av)
to_del.append(idx)
if '=' not in av:
to_del.append(idx + 1)
to_del.sort()
to_del.reverse()
for idx in to_del:
sys.argv.pop(idx)
# Force the quiet mode for setup.py (too verbose by default)
if '-v' not in sys.argv and '--quiet' not in sys.argv and '-q' not in sys.argv:
sys.argv.insert(1, '--quiet')
################################## Prepare the list of files that will be installed
data_files = []
configuration_files = []
# Define files
if 'win' in sys.platform:
default_paths = {
'bin' : install_scripts or "c:\\opsbro\\bin",
'var' : "c:\\opsbro\\var",
'etc' : "c:\\opsbro\\etc",
'log' : "c:\\opsbro\\var\\log",
'run' : "c:\\opsbro\\var",
'libexec': "c:\\opsbro\\libexec",
}
data_files = []
elif 'linux' in sys.platform or 'sunos5' in sys.platform:
default_paths = {
'bin' : install_scripts or "/usr/bin",
'var' : "/var/lib/opsbro/",
'etc' : "/etc/opsbro",
'run' : "/var/run/opsbro",
'log' : "/var/log/opsbro",
'libexec': "/var/lib/opsbro/libexec",
}
data_files = [
(
os.path.join('/etc', 'init.d'),
['init.d/opsbro']
)
]
elif 'bsd' in sys.platform or 'dragonfly' in sys.platform:
default_paths = {
'bin' : install_scripts or "/usr/local/bin",
'var' : "/usr/local/libexec/opsbro",
'etc' : "/usr/local/etc/opsbro",
'run' : "/var/run/opsbro",
'log' : "/var/log/opsbro",
'libexec': "/usr/local/libexec/opsbro/plugins",
}
data_files = [
(
'/usr/local/etc/rc.d',
['bin/rc.d/opsbro']
)
]
else:
raise Exception("Unsupported platform, sorry")
# Beware to install scripts in the bin dir
# compute scripts
scripts = [s for s in glob('bin/opsbro*') if not s.endswith('.py')]
data_files.append((default_paths['bin'], scripts))
def _get_all_from_directory(dirname, path_key, filter_dir=None):
rename_patern_string = r"^(%s\/|%s$)" % (dirname, dirname)
rename_patern = re.compile(rename_patern_string)
directory = dirname
if filter_dir:
directory = os.path.join(dirname, filter_dir)
for path, subdirs, files in os.walk(directory):
dest_path = os.path.join(default_paths[path_key], rename_patern.sub("", path))
# for void directories
if len(files) == 0:
configuration_files.append((dest_path, []))
for name in files:
configuration_files.append((dest_path, [os.path.join(path, name)]))
if not is_update:
_get_all_from_directory('etc', 'etc')
_get_all_from_directory('data', 'var')
else: # only take core directory for update
_get_all_from_directory('data', 'var', filter_dir='core-configuration')
# Libexec is always installed
for path, subdirs, files in os.walk('libexec'):
for name in files:
data_files.append((os.path.join(default_paths['libexec'], re.sub(r"^(libexec\/|libexec$)", "", path)), [os.path.join(path, name)]))
data_files.append((default_paths['run'], []))
data_files.append((default_paths['log'], []))
# Clean data files from all ~ emacs files :)
nd = []
for (r, files) in data_files:
nd.append((r, [p for p in files if not p.endswith('~')]))
data_files = nd
not_allowed_options = ['--upgrade', '--update']
for o in not_allowed_options:
if o in sys.argv:
sys.argv.remove(o)
################################## Look at prerequites, and if possible fix them with the system package instead of pip
if allow_black_magic:
print('')
title = 'Checking prerequites ' + sprintf('(1/3)', color='magenta', end='')
print_h1(title, raw_title=True)
# Maybe we won't be able to setup with packages, if so, switch to pip :(
install_from_pip = []
# Python 3 and 2 have differents packages
if PY3:
mod_need = {
'jinja2': {
'packages': {
'debian' : 'python3-jinja2',
'ubuntu' : 'python3-jinja2',
'amazon-linux' : 'python3-jinja2',
'amazon-linux2': 'python3-jinja2',
'centos' : 'python3-jinja2',
'redhat' : 'python3-jinja2',
'oracle-linux' : 'python3-jinja2',
'fedora' : 'python3-jinja2',
'opensuse' : 'python3-Jinja2',
'alpine' : 'py3-jinja2',
}
},
'Crypto': {
'packages': {
'debian' : 'python3-crypto',
'ubuntu' : 'python3-crypto',
'amazon-linux' : 'python3-crypto',
'amazon-linux2': 'python3-crypto',
'centos' : 'python3-crypto',
'redhat' : 'python3-crypto',
'oracle-linux' : 'python3-crypto',
'fedora' : 'python3-crypto',
'opensuse' : 'python3-pycrypto',
'alpine' : 'py3-crypto',
}
},
}
else:
mod_need = {
'jinja2': {
'packages': {
'debian' : 'python-jinja2',
'ubuntu' : 'python-jinja2',
'amazon-linux' : 'python-jinja2',
'amazon-linux2': 'python-jinja2',
'centos' : 'python-jinja2',
'redhat' : 'python-jinja2',
'oracle-linux' : 'python-jinja2',
'fedora' : 'python-jinja2',
'opensuse' : 'python-Jinja2',
'alpine' : 'py-jinja2',
}
},
'Crypto': {
'packages': {
'debian' : 'python-crypto',
'ubuntu' : 'python-crypto',
'amazon-linux' : 'python-crypto',
'amazon-linux2': 'python-crypto',
'centos' : 'python-crypto',
'redhat' : 'python-crypto',
'oracle-linux' : 'python-crypto',
'fedora' : 'python-crypto',
'opensuse' : 'python-pycrypto',
'alpine' : 'py-crypto',
}
},
}
# Some distro have another name for python-setuptools, so list here only exceptions
setuptools_package_exceptions = {
'alpine' : 'py-setuptools',
'amazon-linux' : 'python27-setuptools',
'amazon-linux2': 'python2-setuptools',
}
# Centos 7.0 and 7.1 have issues to access to the epel release (due to certificates)
# and I don't find how to fix unless remove the https access to it
# if someone have a better solution, with only packages update, I take :)
def _fix_centos_7_epel_no_https():
epel = '/etc/yum.repos.d/epel.repo'
if os.path.exists(epel):
with open(epel, 'r') as f:
lines = f.readlines()
# sed 'mirrorlist=https:' into 'mirrorlist=http:'
# and mirrorlist=https: into mirrorlist=https: (centos 6)
new_file = ''.join([line.replace('metalink=https:', 'metalink=http:').replace('mirrorlist=https:', 'mirrorlist=http:') for line in lines])
with open(epel, 'w') as f:
f.write(new_file)
# Some distro have specific dependencies
distro_prerequites = {
'alpine': [{'package_name': 'musl-dev'}], # monotonic clock
'centos': [
{'package_name': 'libgomp'}, # monotonic clock
{'package_name': 'nss', 'only_for': ['6.6', '6.7', '7.0', '7.1'], 'force_update': True}, # force update of nss for connect to up to date HTTPS, especialy epel
{'package_name': 'epel-release', 'only_for': ['6.7', '7.0', '7.1'], 'post_fix': _fix_centos_7_epel_no_https}, # need for leveldb, and post_fix is need for 6.7
{'package_name': 'leveldb', 'only_for': ['7.0', '7.1']}, # sqlite on old centos is broken
],
}
# If we are uploading to pypi, we just don't want to install/update packages here
if not allow_black_magic:
mod_need.clear()
# We will have to look in which distro we are
is_managed_system = systepacketmgr.is_managed_system()
system_distro, system_distroversion, _ = systepacketmgr.get_distro()
# Hack for debian & centos 6 that is not configure to access leveldb on pypi because pypi did remove http (no S) on november 2017.
# great....
additionnal_pypi_repos = []
if allow_black_magic:
additionnal_pypi_repos.append('https://pypi.python.org/pypi/leveldb/')
if allow_black_magic:
if is_managed_system:
cprint(' * Your system ', end='')
cprint('%s (version %s) ' % (system_distro, system_distroversion), color='magenta', end='')
cprint(u'is managed by this installer: ', end='')
cprint(CHARACTERS.check, color='green')
cprint(' - it will be able to use system package manager to install dependencies.', color='grey')
else:
cprint(" * ", end='')
cprint("%s NOTICE" % CHARACTERS.double_exclamation, color='yellow', end='')
cprint(": your system ", end='')
cprint('(%s - %s) ' % (system_distro, system_distroversion), color='magenta', end='')
cprint('is not a managed/tested system:')
cprint(" - it won't use the package system to install dependencies")
cprint(" - and so it will use the python pip dependency system instead (internet connection is need).")
for (m, d) in mod_need.items():
cprint(' * Checking dependency for ', end='')
cprint('%-20s' % m, color='blue', end='')
cprint(' : ', end='')
sys.stdout.flush()
try:
__import__(m)
cprint('%s' % CHARACTERS.check, color='green')
except ImportError:
cprint('MISSING', color='cyan')
packages = d['packages']
to_install = packages.get(system_distro, '')
pip_failback = d.get('failback_pip', m)
if not to_install:
cprint(' - Cannot find valid packages from system packages on this distribution for the module %s, will be installed by the python pip system instead (need an internet connection)' % m, color='yellow')
install_from_pip.append(pip_failback)
else:
if isinstance(to_install, basestring):
to_install = [to_install]
for pkg in to_install:
cprint(' - Trying to install the package ', color='grey', end='')
cprint('%-20s' % pkg, color='blue', end='')
cprint(' from system packages : ', color='grey', end='')
sys.stdout.flush()
try:
systepacketmgr.update_or_install(pkg)
cprint('%s' % CHARACTERS.check, color='green')
# __import__(m)
except Exception as exp:
cprint('(missing in package)', color='cyan')
cprint(' - cannot install the package from the system. Switching to an installation based on the python pip system (need an internet connection)', color='grey')
_prefix = ' | '
cprint('\n'.join(['%s%s' % (_prefix, s) for s in str(exp).splitlines()]), color='grey')
install_from_pip.append(pip_failback)
if allow_black_magic:
distro_specific_packages = distro_prerequites.get(system_distro, [])
if len(distro_specific_packages) >= 1:
cprint(' * This OS have specific prerequites:')
for package in distro_specific_packages:
package_name = package.get('package_name')
only_for = package.get('only_for', [])
# Maybe this package is only for specific versions, like old centos 7 versions
if len(only_for) != 0:
match_version = False
for only_for_version in only_for:
if system_distroversion.startswith(only_for_version):
match_version = True
if not match_version:
continue
force_update = package.get('force_update', False) # should be updated even if already installed
post_fix = package.get('post_fix', None) # function called AFTER the package installation, to fix something
cprint(' - Prerequite for ', color='grey', end='')
cprint(system_distro, color='magenta', end='')
cprint(' : ', color='grey', end='')
cprint('%-20s' % package_name, color='blue', end='')
cprint(' from system packages : ', color='grey', end='')
sys.stdout.flush()
try:
if not systepacketmgr.has_package(package_name) or force_update:
systepacketmgr.update_or_install(package_name)
if post_fix:
post_fix()
cprint('%s' % CHARACTERS.check, color='green')
except Exception as exp:
cprint(' - ERROR: cannot install the prerequite %s from the system. Please install it manually' % package_name, color='red')
sys.exit(2)
# windows black magic: we ned pywin32
if os.name == 'nt':
try:
import win32api
except ImportError:
# No win32api, try to install it, but setup() seems to fail, so call pip for this
from opsbro.util import exec_command
cprint(' - Prerequite for ', color='grey', end='')
cprint(system_distro, color='magenta', end='')
cprint(' : ', color='grey', end='')
cprint('%-20s' % 'pyiwin32', color='blue', end='')
cprint(' from pypi : ', color='grey', end='')
sys.stdout.flush()
python_exe = os.path.abspath(sys.executable)
# We need both pyiwin32 & pywin32 to works
# But lastest pywin32 on pypi do not support 3.4, cannot install in automagic
if PY3 and sys.version_info.minor == 4: # == 3.4
cprint('ERROR: the python 3.4 is not managed under windows for automatic installaiton, please install pywin32 first (no more available on pypi for this python version).')
sys.exit(2)
for windows_package in ('pypiwin32', 'pywin32'):
pip_install_command = '%s -m pip install --only-binary %s %s' % (python_exe, windows_package, windows_package)
try:
rc, stdout, stderr = exec_command(pip_install_command)
except Exception as exp:
cprint('ERROR: cannot install %s: %s' % (windows_package, exp), color='red')
sys.exit(2)
if rc != 0:
cprint('ERROR: cannot install %s: %s' % (windows_package, stdout + stderr), color='red')
sys.exit(2)
# Now need also to run the python Scripts\pywin32_postinstall.py -install script to register DLL. (I love windows...)
dll_script = os.path.join(os.path.dirname(python_exe), 'Scripts', 'pywin32_postinstall.py')
if not os.path.exists(dll_script):
cprint('ERROR: the pywin32 script to register the DLL is missing. Please install pywin32 manually', color='red')
sys.exit(2)
dll_registering = '%s %s -install' % (python_exe, dll_script)
try:
rc, stdout, stderr = exec_command(dll_registering)
except Exception as exp:
cprint('ERROR: cannot install pyiwin32 dlls: %s' % exp, color='red')
sys.exit(2)
if rc != 0:
cprint('ERROR: cannot install pyiwin32dlls: %s' % (stdout + stderr))
sys.exit(2)
cprint('%s' % CHARACTERS.check, color='green')
# Remove duplicate from pip install
install_from_pip = set(install_from_pip)
# if we are uploading to pypi, we don't want to have dependencies, I don't want pip to do black magic. I already do black magic.
if not allow_black_magic:
install_from_pip = set()
# HACK: debian 6 do not allow any more pypi install, sorry :'(
if system_distro == 'debian' and system_distroversion.startswith('6.'):
install_from_pip = set()
# Try to import setup tools, and if not, switch to
try:
from setuptools import setup, find_packages
except ImportError:
try:
cprint(' * You are missing the python setuptools, trying to install it with system package:', end='')
sys.stdout.flush()
default_setuptools_pkg = 'python-setuptools'
if PY3:
default_setuptools_pkg = 'python3-setuptools'
package_name = setuptools_package_exceptions.get(system_distro, default_setuptools_pkg)
systepacketmgr.install_package(package_name)
cprint(' %s' % CHARACTERS.check, color='green')
from setuptools import setup, find_packages
except Exception as exp:
cprint('Cannot install python setuptools from system (%s). Cannot continue the installation. Please install python-setuptools before re-run the installation.' % exp, color='red')
sys.exit(2)
print('\n')
################################## Go install the python part
if allow_black_magic:
title = 'Python lib installation ' + sprintf('(2/3)', color='magenta', end='')
print_h1(title, raw_title=True)
if install_from_pip:
cprint(' * %s packages will be installed from Pypi (%s)' % (len(install_from_pip), ', '.join(install_from_pip)))
cprint(' * %s opsbro python lib in progress...' % what, end='')
sys.stdout.flush()
hook_stdout()
setup_phase_is_done = False
def print_fail_setup(exp=''):
if setup_phase_is_done:
return
unhook_stdout()
cprint('\nERROR: fail to setup opsbro: (%s)' % exp, color='red')
cprint(stdout_catched.getvalue())
with open(stderr_redirect_path, 'r') as f:
_prefix = ' | '
cprint('Python setuptools call fail:\n%s' % ('\n'.join(['%s%s' % (_prefix, s) for s in f.read().splitlines()])), color='red')
sys.exit(2)
atexit.register(print_fail_setup)
try:
setup(
name="opsbro",
version=VERSION,
packages=find_packages(),
package_data={'': package_data},
description="OpsBro is a service discovery tool",
long_description=read('README.md'),
author="Gabes Jean",
author_email="naparuba@gmail.com",
license="MIT",
url="http://opsbro.io",
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2 :: Only',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Distributed Computing',
],
install_requires=[install_from_pip],
# data_files=data_files,
# include_package_data=True, # we need to let setup() install data files, becwause we must give it AND say it's ok to install....
# TODO: add some more black magic here! I really hate python packaging!
# Maybe some system need specific packages address on pypi, like add httpS on debian 6 :'(
dependency_links=additionnal_pypi_repos,
)
except Exception as exp:
print_fail_setup(exp)
sys.exit(2)
# don't print something at exit now
setup_phase_is_done = True
# We did finish the setup, and we did succeed, so we can put the result into a log, we don't fucking care about
# printing it to everyone unless we want to fear them
unhook_stdout()
if allow_black_magic:
cprint(' %s' % CHARACTERS.check, color='green')
installation_log = '/tmp/opsbro.setup.log' if os.name != 'nt' else r'c:\opsbro.setup.log'
with open(installation_log, 'w') as f:
f.write(stdout_catched.getvalue())
if allow_black_magic:
cprint(' - Raw python setup lib (and possible dependencies) installation log at: %s' % installation_log, color='grey')
f = open(installation_log)
cprint(f.read())
f.close()
################################## Install init.d script, the daemon script and bash completion part
if allow_black_magic:
print('\n')
title = 'Utility script installation ' + sprintf('(3/3)', color='magenta', end='')
print_h1(title, raw_title=True)
# Just a print with aligned test over : OK
def __print_sub_install_part(p):
if allow_black_magic:
cprint(' - %-40s :' % p, color='grey', end='')
cprint(' %s' % CHARACTERS.check, color='green')
def __do_install_files(lst):
# * dir : dest_directory
# * lfiles : local files in this archive
for (dir, lfiles) in lst:
# Be sute the directory do exist
if not os.path.exists(dir):
# ==> mkdir -p
core_logger.debug('The directory %s is missing, creating it' % dir)
os.makedirs(dir)
for lfile in lfiles:
lfile_name = os.path.basename(lfile)
destination = os.path.join(dir, lfile_name)
core_logger.debug("Copying local file %s into %s" % (lfile, destination))
shutil.copy2(lfile, destination)
# Always install standard directories (log, run, etc)
if allow_black_magic:
__do_install_files(data_files)
__print_sub_install_part('OpsBro scripts & directories')
# Also change the rights of the opsbro- scripts
for s in scripts:
bs = os.path.basename(s)
_chmodplusx(os.path.join(default_paths['bin'], bs))
__print_sub_install_part('Check daemon file rights')
_chmodplusx(default_paths['libexec'])
# If not exists, won't raise an error there
_chmodplusx('/etc/init.d/opsbro')
__print_sub_install_part('Check init.d script execution rights')
# if root is set, it's for package, so NO chown
# if pypi upload, don't need this
if not root and is_install and allow_black_magic:
cprint(' * Installing data & scripts (sample configuration, init.d, daemon, bash completion)')
# Install configuration, packs
__do_install_files(configuration_files)
__print_sub_install_part('Sample configuration & core packs')
# Also install the bash completion part if there is such a directory
bash_completion_dir = '/etc/bash_completion.d/'
if os.path.exists(bash_completion_dir):
dest = os.path.join(bash_completion_dir, 'opsbro')
shutil.copy('bash_completion/opsbro', dest)
_chmodplusx(dest)
__print_sub_install_part('bash completion rule')
if not root and is_update and allow_black_magic:
cprint(' * Updating core configuration files')
__print_sub_install_part('Core packs')
core_configuration_dir = os.path.join(default_paths['var'], 'core-configuration')
shutil.rmtree(core_configuration_dir)
__do_install_files(configuration_files)
if allow_black_magic:
print('')
print_h1('End', raw_title=True)
cprint('OpsBro ', end='')
cprint(what, color='magenta', end='')
cprint(' : ', end='')
cprint(' %s' % CHARACTERS.check, color='green')
cprint(' %s Notes: ' % CHARACTERS.corner_bottom_left, color='grey')
cprint(' - you can now start your daemon with: service opsbro start', color='grey')
cprint(' - you can look at all available with: opsbro -h', color='grey')
|
|
#! /usr/bin/python
# Copyright (c) 2007-8, Playful Invention Company
# Copyright (c) 2008-14, Walter Bender
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import cairo
import getopt
import sys
import os
import os.path
import glob
import cStringIO
import errno
import ConfigParser
import gconf
import tarfile
import tempfile
import subprocess
try:
# Try to use XDG Base Directory standard for config files.
import xdg.BaseDirectory
CONFIG_HOME = os.path.join(xdg.BaseDirectory.xdg_config_home, 'turtleart')
except ImportError as e:
# Default to `.config` per the spec.
CONFIG_HOME = os.path.expanduser(os.path.join('~', '.config', 'turtleart'))
argv = sys.argv[:] # Workaround for import behavior of gst in tagplay
sys.argv[1:] = [] # Execution of import gst cannot see '--help' or '-h'
import gettext
from gettext import gettext as _
from TurtleArt.taconstants import (OVERLAY_LAYER, DEFAULT_TURTLE_COLORS,
TAB_LAYER, SUFFIX, TMP_SVG_PATH,
TMP_ODP_PATH, PASTE_OFFSET)
from TurtleArt.tautils import (data_from_string, get_load_name,
get_path, get_save_name, is_writeable)
from TurtleArt.tapalette import default_values
from TurtleArt.tawindow import TurtleArtWindow
from TurtleArt.taexportlogo import save_logo
from TurtleArt.taexportpython import save_python
from TurtleArt.taprimitive import PyExportError
from TurtleArt.taplugin import (load_a_plugin, cancel_plugin_install,
complete_plugin_install)
from util.menubuilder import MenuBuilder
class TurtleMain():
''' Launch Turtle Art in GNOME (from outside of Sugar). '''
_INSTALL_PATH = '/usr/share/sugar/activities/TurtleArt.activity'
_ALTERNATIVE_INSTALL_PATH = \
'/usr/local/share/sugar/activities/TurtleArt.activity'
_ICON_SUBPATH = 'images/turtle.png'
_GNOME_PLUGIN_SUBPATH = 'gnome_plugins'
_HOVER_HELP = '/desktop/sugar/activities/turtleart/hoverhelp'
_ORIENTATION = '/desktop/sugar/activities/turtleart/orientation'
_COORDINATE_SCALE = '/desktop/sugar/activities/turtleart/coordinatescale'
def __init__(self):
self._setting_gconf_overrides = False
self._abspath = os.path.abspath('.')
self._execdirname = self._get_execution_dir()
if self._execdirname is not None:
os.chdir(self._execdirname)
file_activity_info = ConfigParser.ConfigParser()
activity_info_path = os.path.abspath('./activity/activity.info')
file_activity_info.read(activity_info_path)
bundle_id = file_activity_info.get('Activity', 'bundle_id')
self.version = file_activity_info.get('Activity', 'activity_version')
self.name = file_activity_info.get('Activity', 'name')
self.summary = file_activity_info.get('Activity', 'summary')
self.website = file_activity_info.get('Activity', 'website')
self.icon_name = file_activity_info.get('Activity', 'icon')
self.bundle_path = self._abspath
path = os.path.abspath('./locale/')
gettext.bindtextdomain(bundle_id, path)
gettext.textdomain(bundle_id)
global _
_ = gettext.gettext
self._HELP_MSG = 'turtleblocks.py: ' + _('usage is') + '''
\tturtleblocks.py
\tturtleblocks.py project.tb
\tturtleblocks.py --output_png project.tb
\tturtleblocks.py -o project
\tturtleblocks.py --run project.tb
\tturtleblocks.py -r project'''
self._init_vars()
self._parse_command_line()
self._ensure_sugar_paths()
self._gnome_plugins = []
self._selected_sample = None
self._sample_window = None
self.has_toolbarbox = False
if self._output_png:
# Outputing to file, so no need for a canvas
self.canvas = None
self._build_window(interactive=False)
self._draw_and_quit()
else:
self._read_initial_pos()
self._init_gnome_plugins()
self._get_gconf_settings()
self._setup_gtk()
self._build_window()
self._run_gnome_plugins()
self._start_gtk()
def _get_gconf_settings(self):
self.client = gconf.client_get_default()
def get_config_home(self):
return CONFIG_HOME
def _get_gnome_plugin_home(self):
''' Use plugin directory associated with execution path. '''
if os.path.exists(os.path.join(self._execdirname,
self._GNOME_PLUGIN_SUBPATH)):
return os.path.join(self._execdirname, self._GNOME_PLUGIN_SUBPATH)
else:
return None
def _get_plugin_candidates(self, path):
''' Look for plugin files in plugin directory. '''
plugin_files = []
if path is not None:
candidates = os.listdir(path)
for c in candidates:
if c[-10:] == '_plugin.py' and c[0] != '#' and c[0] != '.':
plugin_files.append(c.split('.')[0])
return plugin_files
def _init_gnome_plugins(self):
''' Try launching any plugins we may have found. '''
for p in self._get_plugin_candidates(self._get_gnome_plugin_home()):
P = p.capitalize()
f = "def f(self): from gnome_plugins.%s import %s; \
return %s(self)" % (p, P, P)
plugin = {}
try:
exec f in globals(), plugin
self._gnome_plugins.append(plugin.values()[0](self))
except ImportError as e:
print 'failed to import %s: %s' % (P, str(e))
def _run_gnome_plugins(self):
''' Tell the plugin about the TurtleWindow instance. '''
for p in self._gnome_plugins:
p.set_tw(self.tw)
def _mkdir_p(self, path):
'''Create a directory in a fashion similar to `mkdir -p`.'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _makepath(self, path):
''' Make a path if it doesn't previously exist '''
from os import makedirs
from os.path import normpath, dirname, exists
dpath = normpath(dirname(path))
if not exists(dpath):
makedirs(dpath)
def _start_gtk(self):
''' Get a main window set up. '''
self.win.connect('configure_event', self.tw.update_overlay_position)
self.tw.parent = self.win
self.init_complete = True
if self._ta_file is None:
self.tw.load_start()
else:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
gobject.idle_add(self._project_loader, self._ta_file)
self._set_gconf_overrides()
gtk.main()
def _project_loader(self, file_name):
self.tw.load_start(self._ta_file)
self.tw.lc.trace = 0
if self._run_on_launch:
self._do_run_cb()
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
def _draw_and_quit(self):
''' Non-interactive mode: run the project, save it to a file
and quit. '''
self.tw.load_start(self._ta_file)
self.tw.lc.trace = 0
self.tw.run_button(0)
self.tw.save_as_image(self._ta_file)
def _build_window(self, interactive=True):
''' Initialize the TurtleWindow instance. '''
if interactive:
win = self.canvas.get_window()
cr = win.cairo_create()
surface = cr.get_target()
else:
img_surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
1024, 768)
cr = cairo.Context(img_surface)
surface = cr.get_target()
self.turtle_canvas = surface.create_similar(
cairo.CONTENT_COLOR,
# max(1024, gtk.gdk.screen_width() * 2),
# max(768, gtk.gdk.screen_height() * 2))
gtk.gdk.screen_width() * 2,
gtk.gdk.screen_height() * 2)
# Make sure the autosave directory is writeable
if is_writeable(self._execdirname):
self._autosavedirname = self._execdirname
else:
self._autosavedirname = os.path.expanduser('~')
self.tw = TurtleArtWindow(self.canvas, self._execdirname,
turtle_canvas=self.turtle_canvas,
activity=self, running_sugar=False)
self.tw.save_folder = self._abspath # os.path.expanduser('~')
if hasattr(self, 'client'):
if self.client.get_int(self._HOVER_HELP) == 1:
self.tw.no_help = True
self.hover.set_active(False)
self._do_hover_help_off_cb()
if not self.client.get_int(self._COORDINATE_SCALE) in [0, 1]:
self.tw.coord_scale = 1
else:
self.tw.coord_scale = 0
if self.client.get_int(self._ORIENTATION) == 1:
self.tw.orientation = 1
def _set_gconf_overrides(self):
if self.tw.coord_scale == 0:
self.tw.coord_scale = 1
else:
self._do_rescale_cb(None)
if self.tw.coord_scale != 1:
self._setting_gconf_overrides = True
self.coords.set_active(True)
self._setting_gconf_overrides = False
def _init_vars(self):
''' If we are invoked to start a project from Gnome, we should make
sure our current directory is TA's source dir. '''
self._ta_file = None
self._output_png = False
self._run_on_launch = False
self.current_palette = 0
self.scale = 2.0
self.tw = None
self.init_complete = False
def _parse_command_line(self):
''' Try to make sense of the command-line arguments. '''
try:
opts, args = getopt.getopt(argv[1:], 'hor',
['help', 'output_png', 'run'])
except getopt.GetoptError as err:
print str(err)
print self._HELP_MSG
sys.exit(2)
self._run_on_launch = False
for o, a in opts:
if o in ('-h', '--help'):
print self._HELP_MSG
sys.exit()
if o in ('-o', '--output_png'):
self._output_png = True
elif o in ('-r', '--run'):
self._run_on_launch = True
else:
assert False, _('No option action:') + ' ' + o
if args:
self._ta_file = args[0]
if len(args) > 1 or self._output_png and self._ta_file is None:
print self._HELP_MSG
sys.exit()
if self._ta_file is not None:
if not self._ta_file.endswith(SUFFIX):
self._ta_file += '.tb'
if not os.path.exists(self._ta_file):
self._ta_file = os.path.join(self._abspath, self._ta_file)
if not os.path.exists(self._ta_file):
assert False, ('%s: %s' %
(self._ta_file, _('File not found')))
def _ensure_sugar_paths(self):
''' Make sure Sugar paths are present. '''
tapath = os.path.join(os.environ['HOME'], '.sugar', 'default',
'org.laptop.TurtleArtActivity')
map(self._makepath, (os.path.join(tapath, 'data/'),
os.path.join(tapath, 'instance/')))
def _read_initial_pos(self):
''' Read saved configuration. '''
try:
data_file = open(os.path.join(CONFIG_HOME, 'turtleartrc'), 'r')
except IOError:
# Opening the config file failed
# We'll assume it needs to be created
try:
self._mkdir_p(CONFIG_HOME)
data_file = open(os.path.join(CONFIG_HOME, 'turtleartrc'),
'a+')
except IOError as e:
# We can't write to the configuration file, use
# a faux file that will persist for the length of
# the session.
print _('Configuration directory not writable: %s') % (e)
data_file = cStringIO.StringIO()
data_file.write(str(50) + '\n')
data_file.write(str(50) + '\n')
data_file.write(str(800) + '\n')
data_file.write(str(550) + '\n')
data_file.seek(0)
try:
self.x = int(data_file.readline())
self.y = int(data_file.readline())
self.width = int(data_file.readline())
self.height = int(data_file.readline())
except ValueError:
self.x = 50
self.y = 50
self.width = 800
self.height = 550
def _fixed_resize_cb(self, widget=None, rect=None):
''' If a toolbar opens or closes, we need to resize the vbox
holding out scrolling window. '''
self.vbox.set_size_request(rect[2], rect[3])
self.menu_height = self.menu_bar.size_request()[1]
def restore_cursor(self):
''' No longer copying or sharing, so restore standard cursor. '''
self.tw.copying_blocks = False
self.tw.sharing_blocks = False
self.tw.saving_blocks = False
self.tw.deleting_blocks = False
if hasattr(self, 'get_window'):
if hasattr(self.get_window(), 'get_cursor'):
self.get_window().set_cursor(self._old_cursor)
else:
self.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
def _setup_gtk(self):
''' Set up a scrolled window in which to run Turtle Blocks. '''
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.set_default_size(self.width, self.height)
win.move(self.x, self.y)
win.maximize()
win.set_title('%s %s' % (self.name, str(self.version)))
if os.path.exists(os.path.join(self._execdirname, self._ICON_SUBPATH)):
win.set_icon_from_file(os.path.join(self._execdirname,
self._ICON_SUBPATH))
win.show()
win.connect('delete_event', self._quit_ta)
''' Create a scrolled window to contain the turtle canvas. We
add a Fixed container in order to position text Entry widgets
on top of string and number blocks.'''
self.fixed = gtk.Fixed()
self.fixed.connect('size-allocate', self._fixed_resize_cb)
width = gtk.gdk.screen_width() - 80
height = gtk.gdk.screen_height() - 80
self.fixed.set_size_request(width, height)
self.vbox = gtk.VBox(False, 0)
self.vbox.show()
self.menu_bar = self._get_menu_bar()
self.vbox.pack_start(self.menu_bar, False, False)
self.menu_bar.show()
self.menu_height = self.menu_bar.size_request()[1]
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw.show()
canvas = gtk.DrawingArea()
width = gtk.gdk.screen_width() * 2
height = gtk.gdk.screen_height() * 2
canvas.set_size_request(width, height)
self.sw.add_with_viewport(canvas)
canvas.show()
self.vbox.pack_end(self.sw, True, True)
self.fixed.put(self.vbox, 0, 0)
self.fixed.show()
win.add(self.fixed)
win.show_all()
self.win = win
self.canvas = canvas
def _get_menu_bar(self):
''' Instead of Sugar toolbars, use GNOME menus. '''
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('New'), self._do_new_cb)
MenuBuilder.make_menu_item(menu, _('Show sample projects'),
self._create_store)
MenuBuilder.make_menu_item(menu, _('Open'), self._do_open_cb)
MenuBuilder.make_menu_item(menu, _('Add project'), self._do_load_cb)
MenuBuilder.make_menu_item(menu, _('Load plugin'),
self._do_load_plugin_cb)
MenuBuilder.make_menu_item(menu, _('Save'), self._do_save_cb)
MenuBuilder.make_menu_item(menu, _('Save as'), self._do_save_as_cb)
# export submenu
export_submenu = gtk.Menu()
export_menu = MenuBuilder.make_sub_menu(export_submenu, _('Export as'))
menu.append(export_menu)
MenuBuilder.make_menu_item(export_submenu, _('image'),
self._do_save_picture_cb)
MenuBuilder.make_menu_item(export_submenu, _('SVG'),
self._do_save_svg_cb)
MenuBuilder.make_menu_item(export_submenu, _('icon'),
self._do_save_as_icon_cb)
# TRANS: ODP is Open Office presentation
MenuBuilder.make_menu_item(export_submenu, _('ODP'),
self._do_save_as_odp_cb)
MenuBuilder.make_menu_item(export_submenu, _('Logo'),
self._do_save_logo_cb)
MenuBuilder.make_menu_item(export_submenu, _('Python'),
self._do_save_python_cb)
MenuBuilder.make_menu_item(menu, _('Quit'), self._quit_ta)
activity_menu = MenuBuilder.make_sub_menu(menu, _('File'))
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('Cartesian coordinates'),
self._do_cartesian_cb)
MenuBuilder.make_menu_item(menu, _('Polar coordinates'),
self._do_polar_cb)
self.coords = MenuBuilder.make_checkmenu_item(
menu, _('Rescale coordinates'),
self._do_rescale_cb, status=False)
MenuBuilder.make_menu_item(menu, _('Grow blocks'),
self._do_resize_cb, 1.5)
MenuBuilder.make_menu_item(menu, _('Shrink blocks'),
self._do_resize_cb, 0.667)
MenuBuilder.make_menu_item(menu, _('Reset block size'),
self._do_resize_cb, -1)
self.hover = MenuBuilder.make_checkmenu_item(
menu, _('Turn on hover help'),
self._do_toggle_hover_help_cb, status=True)
view_menu = MenuBuilder.make_sub_menu(menu, _('View'))
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('Copy'), self._do_copy_cb)
MenuBuilder.make_menu_item(menu, _('Paste'), self._do_paste_cb)
MenuBuilder.make_menu_item(menu, _('Save stack'),
self._do_save_macro_cb)
MenuBuilder.make_menu_item(menu, _('Delete stack'),
self._do_delete_macro_cb)
edit_menu = MenuBuilder.make_sub_menu(menu, _('Edit'))
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('Show palette'),
self._do_palette_cb)
MenuBuilder.make_menu_item(menu, _('Hide palette'),
self._do_hide_palette_cb)
MenuBuilder.make_menu_item(menu, _('Show/hide blocks'),
self._do_hideshow_cb)
tool_menu = MenuBuilder.make_sub_menu(menu, _('Tools'))
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('Clean'), self._do_eraser_cb)
MenuBuilder.make_menu_item(menu, _('Run'), self._do_run_cb)
MenuBuilder.make_menu_item(menu, _('Step'), self._do_step_cb)
MenuBuilder.make_menu_item(menu, _('Debug'), self._do_trace_cb)
MenuBuilder.make_menu_item(menu, _('Stop'), self._do_stop_cb)
turtle_menu = MenuBuilder.make_sub_menu(menu, _('Turtle'))
menu = gtk.Menu()
MenuBuilder.make_menu_item(menu, _('About...'), self._do_about_cb)
help_menu = MenuBuilder.make_sub_menu(menu, _('Help'))
menu_bar = gtk.MenuBar()
menu_bar.append(activity_menu)
menu_bar.append(edit_menu)
menu_bar.append(view_menu)
menu_bar.append(tool_menu)
menu_bar.append(turtle_menu)
# Add menus for plugins
for p in self._gnome_plugins:
menu_item = p.get_menu()
if menu_item is not None:
menu_bar.append(menu_item)
menu_bar.append(help_menu)
return menu_bar
def _quit_ta(self, widget=None, e=None):
''' Save changes on exit '''
project_empty = self.tw.is_project_empty()
if not project_empty:
resp = self._show_save_dialog(e is None)
if resp == gtk.RESPONSE_YES:
if self.tw.is_new_project():
self._save_as()
else:
if self.tw.project_has_changed():
self._save_changes()
elif resp == gtk.RESPONSE_CANCEL:
return
if hasattr(self, 'client'):
self.client.set_int(self._ORIENTATION, self.tw.orientation)
for plugin in self.tw.turtleart_plugins:
if hasattr(plugin, 'quit'):
plugin.quit()
# Clean up temporary files
if os.path.exists(TMP_SVG_PATH):
os.remove(TMP_SVG_PATH)
if os.path.exists(TMP_ODP_PATH):
os.remove(TMP_ODP_PATH)
gtk.main_quit()
exit()
def _show_save_dialog(self, add_cancel=False):
''' Dialog for save project '''
dlg = gtk.MessageDialog(parent=None, type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_YES_NO,
message_format=_('You have unsaved work. \
Would you like to save before quitting?'))
dlg.set_default_response(gtk.RESPONSE_YES)
if add_cancel:
dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dlg.set_title(_('Save project?'))
dlg.set_property('skip-taskbar-hint', False)
resp = dlg.run()
dlg.destroy()
return resp
def _reload_plugin_alert(self, tmp_dir, tmp_path, plugin_path, plugin_name,
file_info):
print "Already installed"
title = _('Plugin %s already installed') % plugin_name
msg = _('Do you want to reinstall %s?') % plugin_name
dlg = gtk.MessageDialog(parent=None, type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_YES_NO,
message_format=title)
dlg.format_secondary_text(msg)
dlg.set_title(title)
dlg.set_property('skip-taskbar-hint', False)
resp = dlg.run()
dlg.destroy()
if resp is gtk.RESPONSE_OK:
complete_plugin_install(tmp_dir, tmp_path, plugin_path,
plugin_name, file_info)
elif resp is gtk.RESPONSE_CANCEL:
cancel_plugin_install(tmp_dir)
def _do_new_cb(self, widget):
''' Callback for new project. '''
self.tw.new_project()
self.tw.load_start()
def _do_open_cb(self, widget):
''' Callback for open project. '''
self.tw.load_file_from_chooser(True)
def _do_load_cb(self, widget):
''' Callback for load project (add to current project). '''
self.tw.load_file_from_chooser(False)
def _do_load_plugin_cb(self, widget):
self.tw.load_save_folder = self._get_execution_dir()
file_path, loaddir = get_load_name('.tar.gz', self.tw.load_save_folder)
if file_path is None:
return
try:
# Copy to tmp file since some systems had trouble
# with gunzip directly from datastore
datapath = get_path(None, 'instance')
if not os.path.exists(datapath):
os.makedirs(datapath)
tmpfile = os.path.join(datapath, 'tmpfile.tar.gz')
subprocess.call(['cp', file_path, tmpfile])
status = subprocess.call(['gunzip', tmpfile])
if status == 0:
tar_fd = tarfile.open(tmpfile[:-3], 'r')
else:
tar_fd = tarfile.open(tmpfile, 'r')
except:
tar_fd = tarfile.open(file_path, 'r')
tmp_dir = tempfile.mkdtemp()
try:
tar_fd.extractall(tmp_dir)
load_a_plugin(self, tmp_dir)
self.restore_cursor()
except:
self.restore_cursor()
finally:
tar_fd.close()
# Remove tmpfile.tar
subprocess.call(['rm',
os.path.join(datapath, 'tmpfile.tar')])
def _do_save_cb(self, widget):
''' Callback for save project. '''
self.tw.save_file(self._ta_file)
def _do_save_as_cb(self, widget):
''' Callback for save-as project. '''
self._save_as()
def autosave(self):
''' Autosave is called each type the run button is pressed '''
temp_load_save_folder = self.tw.load_save_folder
temp_save_folder = self.tw.save_folder
self.tw.load_save_folder = self._autosavedirname
self.tw.save_folder = self._autosavedirname
self.tw.save_file(file_name=os.path.join(
self._autosavedirname, 'autosave.tb'))
self.tw.save_folder = temp_save_folder
self.tw.load_save_folder = temp_load_save_folder
def _save_as(self):
''' Save as is called from callback and quit '''
self.tw.save_file_name = self._ta_file
self.tw.save_file()
def _save_changes(self):
''' Save changes to current project '''
self.tw.save_file_name = self._ta_file
self.tw.save_file(self.tw._loaded_project)
def _do_save_picture_cb(self, widget):
''' Callback for save canvas. '''
self.tw.save_as_image()
def _do_save_svg_cb(self, widget):
''' Callback for save canvas as SVG. '''
self.tw.save_as_image(svg=True)
def _do_save_as_icon_cb(self, widget):
''' Callback for save canvas. '''
self.tw.write_svg_operation()
self.tw.save_as_icon()
def _do_save_as_odp_cb(self, widget):
''' Callback for save canvas. '''
self.tw.save_as_odp()
def _do_save_logo_cb(self, widget):
''' Callback for save project to Logo. '''
logocode = save_logo(self.tw)
if len(logocode) == 0:
return
save_type = '.lg'
self.tw.load_save_folder = self._get_execution_dir()
filename, self.tw.load_save_folder = get_save_name(
save_type, self.tw.load_save_folder, 'logosession')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if filename is not None:
f = file(filename, 'w')
f.write(logocode)
f.close()
def _do_save_python_cb(self, widget):
''' Callback for saving the project as Python code. '''
# catch PyExportError and display a user-friendly message instead
try:
pythoncode = save_python(self.tw)
except PyExportError as pyee:
if pyee.block is not None:
pyee.block.highlight()
self.tw.showlabel('status', str(pyee))
print pyee
return
if not pythoncode:
return
# use name of TA project if it has been saved already
default_name = self.tw.save_file_name
if default_name is None:
default_name = _("myproject")
elif default_name.endswith(".ta") or default_name.endswith(".tb"):
default_name = default_name[:-3]
save_type = '.py'
self.tw.load_save_folder = self._get_execution_dir()
filename, self.tw.load_save_folder = get_save_name(
save_type, self.tw.load_save_folder, default_name)
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if filename is not None:
f = file(filename, 'w')
f.write(pythoncode)
f.close()
def _do_resize_cb(self, widget, factor):
''' Callback to resize blocks. '''
if factor == -1:
self.tw.block_scale = 2.0
else:
self.tw.block_scale *= factor
self.tw.resize_blocks()
def _do_cartesian_cb(self, button):
''' Callback to display/hide Cartesian coordinate overlay. '''
self.tw.set_cartesian(True)
def _do_polar_cb(self, button):
''' Callback to display/hide Polar coordinate overlay. '''
self.tw.set_polar(True)
def _do_rescale_cb(self, button):
''' Callback to rescale coordinate space. '''
if self._setting_gconf_overrides:
return
if self.tw.coord_scale == 1:
self.tw.coord_scale = self.tw.height / 40
self.tw.update_overlay_position()
if self.tw.cartesian is True:
self.tw.overlay_shapes['Cartesian_labeled'].hide()
self.tw.overlay_shapes['Cartesian'].set_layer(OVERLAY_LAYER)
default_values['forward'] = [10]
default_values['back'] = [10]
default_values['arc'] = [90, 10]
default_values['setpensize'] = [1]
self.tw.turtles.get_active_turtle().set_pen_size(1)
else:
self.tw.coord_scale = 1
if self.tw.cartesian is True:
self.tw.overlay_shapes['Cartesian'].hide()
self.tw.overlay_shapes['Cartesian_labeled'].set_layer(
OVERLAY_LAYER)
default_values['forward'] = [100]
default_values['back'] = [100]
default_values['arc'] = [90, 100]
default_values['setpensize'] = [5]
self.tw.turtles.get_active_turtle().set_pen_size(5)
if hasattr(self, 'client'):
self.client.set_int(self._COORDINATE_SCALE,
int(self.tw.coord_scale))
self.tw.recalculate_constants()
def _do_toggle_hover_help_cb(self, button):
''' Toggle hover help on/off '''
self.tw.no_help = not(button.get_active())
if self.tw.no_help:
self._do_hover_help_off_cb()
else:
self._do_hover_help_on_cb()
def _do_hover_help_on_cb(self):
''' Turn hover help on '''
if hasattr(self, 'client'):
self.client.set_int(self._HOVER_HELP, 0)
def _do_hover_help_off_cb(self):
''' Turn hover help off '''
self.tw.last_label = None
if self.tw.status_spr is not None:
self.tw.status_spr.hide()
if hasattr(self, 'client'):
self.client.set_int(self._HOVER_HELP, 1)
def _do_palette_cb(self, widget):
''' Callback to show/hide palette of blocks. '''
self.tw.show_palette(self.current_palette)
self.current_palette += 1
if self.current_palette == len(self.tw.palettes):
self.current_palette = 0
def _do_hide_palette_cb(self, widget):
''' Hide the palette of blocks. '''
self.tw.hide_palette()
def _do_hideshow_cb(self, widget):
''' Hide/show the blocks. '''
self.tw.hideshow_button()
def _do_eraser_cb(self, widget):
''' Callback for eraser button. '''
self.tw.eraser_button()
return
def _do_run_cb(self, widget=None):
''' Callback for run button (rabbit). '''
self.tw.lc.trace = 0
self.tw.hideblocks()
self.tw.display_coordinates(clear=True)
self.tw.toolbar_shapes['stopiton'].set_layer(TAB_LAYER)
self.tw.run_button(0, running_from_button_push=True)
return
def _do_step_cb(self, widget):
''' Callback for step button (turtle). '''
self.tw.lc.trace = 1
self.tw.run_button(3, running_from_button_push=True)
return
def _do_trace_cb(self, widget):
''' Callback for debug button (bug). '''
self.tw.lc.trace = 1
self.tw.run_button(9, running_from_button_push=True)
return
def _do_stop_cb(self, widget):
''' Callback for stop button. '''
if self.tw.running_blocks:
self.tw.toolbar_shapes['stopiton'].hide()
if self.tw.hide:
self.tw.showblocks()
self.tw.stop_button()
self.tw.display_coordinates()
def _do_save_macro_cb(self, widget):
''' Callback for save stack button. '''
self.tw.copying_blocks = False
self.tw.deleting_blocks = False
if self.tw.saving_blocks:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
self.tw.saving_blocks = False
else:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.tw.saving_blocks = True
def _do_delete_macro_cb(self, widget):
''' Callback for delete stack button. '''
self.tw.copying_blocks = False
self.tw.saving_blocks = False
if self.tw.deleting_blocks:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
self.tw.deleting_blocks = False
else:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.tw.deleting_blocks = True
def _do_copy_cb(self, button):
''' Callback for copy button. '''
self.tw.saving_blocks = False
self.tw.deleting_blocks = False
if self.tw.copying_blocks:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
self.tw.copying_blocks = False
else:
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.tw.copying_blocks = True
def _do_paste_cb(self, button):
''' Callback for paste button. '''
self.tw.copying_blocks = False
self.tw.saving_blocks = False
self.tw.deleting_blocks = False
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
clipboard = gtk.Clipboard()
text = clipboard.wait_for_text()
if text is not None:
if self.tw.selected_blk is not None and \
self.tw.selected_blk.name == 'string' and \
text[0:2] != '[[': # Don't paste block data into a string
self.tw.paste_text_in_block_label(text)
self.tw.selected_blk.resize()
else:
self.tw.process_data(data_from_string(text),
self.tw.paste_offset)
self.tw.paste_offset += PASTE_OFFSET
def _do_about_cb(self, widget):
about = gtk.AboutDialog()
about.set_program_name(_(self.name))
about.set_version(self.version)
about.set_comments(_(self.summary))
about.set_website(self.website)
about.set_logo(
gtk.gdk.pixbuf_new_from_file(
'activity/' + self.icon_name + '.svg'))
about.run()
about.destroy()
def _window_event(self, event, data):
''' Callback for resize event. '''
data_file = open('.turtleartrc', 'w')
data_file.write(str(data.x) + '\n')
data_file.write(str(data.y) + '\n')
data_file.write(str(data.width) + '\n')
data_file.write(str(data.height) + '\n')
def nick_changed(self, nick):
''' TODO: Rename default turtle in dictionary '''
pass
def color_changed(self, colors):
''' Reskin turtle with collaboration colors '''
turtle = self.tw.turtles.get_turtle(self.tw.default_turtle_name)
try:
turtle.colors = colors.split(',')
except:
turtle.colors = DEFAULT_TURTLE_COLORS
turtle.custom_shapes = True # Force regeneration of shapes
turtle.reset_shapes()
turtle.show()
def _get_execution_dir(self):
''' From whence is the program being executed? '''
dirname = os.path.dirname(__file__)
if dirname == '':
if os.path.exists(os.path.join('~', 'Activities',
'TurtleArt.activity')):
return os.path.join('~', 'Activities', 'TurtleArt.activity')
elif os.path.exists(self._INSTALL_PATH):
return self._INSTALL_PATH
elif os.path.exists(self._ALTERNATIVE_INSTALL_PATH):
return self._ALTERNATIVE_INSTALL_PATH
else:
return os.path.abspath('.')
else:
return os.path.abspath(dirname)
def restore_state(self):
''' Anything that needs restoring after a clear screen can go here '''
pass
def hide_store(self, widget=None):
if self._sample_window is not None:
self._sample_box.hide()
def _create_store(self, widget=None):
if self._sample_window is None:
self._sample_box = gtk.EventBox()
self._sample_window = gtk.ScrolledWindow()
self._sample_window.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
width = gtk.gdk.screen_width() / 2
height = gtk.gdk.screen_height() / 2
self._sample_window.set_size_request(width, height)
self._sample_window.show()
store = gtk.ListStore(gtk.gdk.Pixbuf, str)
icon_view = gtk.IconView()
icon_view.set_model(store)
icon_view.set_selection_mode(gtk.SELECTION_SINGLE)
icon_view.connect('selection-changed', self._sample_selected,
store)
icon_view.set_pixbuf_column(0)
icon_view.grab_focus()
self._sample_window.add_with_viewport(icon_view)
icon_view.show()
self._fill_samples_list(store)
width = gtk.gdk.screen_width() / 4
height = gtk.gdk.screen_height() / 4
self._sample_box.add(self._sample_window)
self.fixed.put(self._sample_box, width, height)
self._sample_window.show()
self._sample_box.show()
def _get_selected_path(self, widget, store):
try:
iter_ = store.get_iter(widget.get_selected_items()[0])
image_path = store.get(iter_, 1)[0]
return image_path, iter_
except:
return None
def _sample_selected(self, widget, store):
selected = self._get_selected_path(widget, store)
if selected is None:
self._selected_sample = None
self._sample_window.hide()
return
image_path, _iter = selected
iter_ = store.get_iter(widget.get_selected_items()[0])
image_path = store.get(iter_, 1)[0]
self._selected_sample = image_path
self._sample_window.hide()
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
gobject.idle_add(self._sample_loader)
def _sample_loader(self):
# Convert from thumbnail path to sample path
basename = os.path.basename(self._selected_sample)[:-4]
for suffix in ['.ta', '.tb']:
file_path = os.path.join(self._execdirname,
'samples', basename + suffix)
if os.path.exists(file_path):
self.tw.load_files(file_path)
break
self.tw.load_save_folder = os.path.join(self._get_execution_dir(),
'samples')
self.win.get_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
def _fill_samples_list(self, store):
'''
Append images from the artwork_paths to the store.
'''
for filepath in self._scan_for_samples():
pixbuf = None
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
filepath, 100, 100)
store.append([pixbuf, filepath])
def _scan_for_samples(self):
samples = sorted(
glob.glob(
os.path.join(
self._get_execution_dir(),
'samples',
'thumbnails',
'*.png')))
return samples
if __name__ == '__main__':
TurtleMain()
|
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import cv2
import sys, os
import argparse
import copy
from importlib import import_module
import DeepFried2 as df
from lbtoolbox.util import flipany, printnow
from training_utils import dotrain, dostats, dopred
from df_extras import BiternionCriterion
from common import deg2bit, bit2deg, flipbiternions, ensemble_biternions, cutout
pjoin = os.path.join
def myimread(fname, netlib):
im = cv2.imread(fname, flags=cv2.IMREAD_COLOR)
if im is None:
raise ValueError("Couldn't load image " + fname)
im = cutout(im, *netlib.getrect(0, 0, im.shape[1], im.shape[0]))
return netlib.preproc(im)
def load(path, testname, skip, ydict, netlib):
Xtr, Xte = [], []
ytr, yte = [], []
ntr, nte = [], []
for lbl in os.listdir(path):
for f in os.listdir(pjoin(path, lbl)):
if f.startswith(testname):
Xte.append(myimread(pjoin(path, lbl, f), netlib))
yte.append(ydict[lbl])
nte.append(f)
elif not any(f.startswith(s) for s in skip):
Xtr.append(myimread(pjoin(path, lbl, f), netlib))
ytr.append(ydict[lbl])
ntr.append(f)
return np.array(Xtr), np.array(Xte), np.array(ytr), np.array(yte), ntr, nte
def merge4to8(X, y, n):
y8 = np.full_like(y['4p'], np.nan)
for idx4p, n4p in enumerate(n['4p']):
idx4x = n['4x'].index(n4p)
y4x, y4p = y['4x'][idx4x], y['4p'][idx4p]
y8[idx4p] = y4x + y4p if not (y4x == 0 and y4p == 3) else 7
return X['4p'].copy(), y8, copy.deepcopy(n['4p'])
def flipped(X, y, n, old, new):
indices = np.where(y == old)[0]
return flipany(X[indices], dim=3), np.full(len(indices), new, dtype=y.dtype), [n[i] for i in indices]
def flipall(X, y, n, flips):
fx, fy, fn = [], [], []
for old, new in flips:
a, b, c = flipped(X, y, n, old, new)
fx.append(a) ; fy.append(b) ; fn.append(c)
return np.concatenate([X] + fx), np.concatenate([y] + fy), n + sum(fn, list())
def dopred_bit(model, aug, X, batchsize=100):
return dopred(model, aug, X, ensembling=ensemble_biternions, output2preds=lambda x: x, batchsize=batchsize)
def maad_from_deg(preds, reals):
return np.rad2deg(np.abs(np.arctan2(np.sin(np.deg2rad(reals-preds)), np.cos(np.deg2rad(reals-preds)))))
def skip_one_classers(X,y,n):
X['4p'] = np.array([X['4p'][i] for i,name in enumerate(n['4p']) if name in n['4x']])
y['4p'] = np.array([y['4p'][i] for i,name in enumerate(n['4p']) if name in n['4x']])
n['4p'] = [name for name in n['4p'] if name in n['4x']]
X['4x'] = np.array([X['4x'][i] for i,name in enumerate(n['4x']) if name in n['4p']])
y['4x'] = np.array([y['4x'][i] for i,name in enumerate(n['4x']) if name in n['4p']])
n['4x'] = [name for name in n['4x'] if name in n['4p']]
return X, y, n
def prepare_data(datadir, netlib):
classes4x = ['front','right','back','left']
classnums4x = {c: i for i, c in enumerate(classes4x)}
classes4p = ['frontright','backright','backleft','frontleft']
classnums4p = {c: i for i, c in enumerate(classes4p)}
classes8 = ['frontright','rightfront','rightback','backright','backleft','leftback','leftfront','frontleft']
classnums8 = {c: i for i, c in enumerate(classes8)}
centre8 = {
'frontright': 22.5,
'rightfront': 67.5,
'rightback': 112.5,
'backright': 157.5,
'backleft': 202.5,
'leftback': 247.5,
'leftfront': 292.5,
'frontleft': 337.5,
}
Xtr, Xte = {}, {}
ytr, yte = {}, {}
ntr, nte = {}, {}
for name, ydict in {'4x': classnums4x, '4p': classnums4p}.items():
Xtr[name], Xte[name], ytr[name], yte[name], ntr[name], nte[name] = load(pjoin(datadir, name),
testname='lucas', skip=['.', 'dog', 'dog2', 'doggy'], ydict=ydict, netlib=netlib
)
Xtr,ytr,ntr = skip_one_classers(Xtr,ytr,ntr)
Xte,yte,nte = skip_one_classers(Xte,yte,nte)
for name in Xtr:
print(name)
print("Trainset: X({}), y({})".format(Xtr[name].shape, ytr[name].shape))
print("Testset: X({}), y({})".format(Xte[name].shape, yte[name].shape))
print("Labels: {}".format(set(ytr[name])))
# Merge 4x and 4p into 8
Xtr['8'], ytr['8'], ntr['8'] = merge4to8(Xtr, ytr, ntr)
Xte['8'], yte['8'], nte['8'] = merge4to8(Xte, yte, nte)
#Do flip-augmentation beforehand.
flips = [
(classnums8['frontright'], classnums8['frontleft']),
(classnums8['rightfront'], classnums8['leftfront']),
(classnums8['rightback'], classnums8['leftback']),
(classnums8['backright'], classnums8['backleft']),
(classnums8['backleft'], classnums8['backright']),
(classnums8['leftback'], classnums8['rightback']),
(classnums8['leftfront'], classnums8['rightfront']),
(classnums8['frontleft'], classnums8['frontright']),
]
Xtr['8'], ytr['8'], ntr['8'] = flipall(Xtr['8'], ytr['8'], ntr['8'], flips=flips)
Xte['8'], yte['8'], nte['8'] = flipall(Xte['8'], yte['8'], nte['8'], flips=flips)
# Convert class-IDs into biternions.
ytr = np.array([deg2bit(centre8[classes8[y]]) for y in ytr['8']])
yte = np.array([deg2bit(centre8[classes8[y]]) for y in yte['8']])
return Xtr['8'], ytr, Xte['8'], yte, nte['8']
if __name__ == '__main__':
try:
# Add the "models" directory to the path!
from rospkg import RosPack
modeldir = pjoin(RosPack().get_path('biternion'), 'models')
sys.path.append(pjoin(RosPack().get_path('biternion'), 'scripts'))
except ImportError:
modeldir = os.path.dirname(os.path.abspath(os.path.join(__file__, '../models')))
parser = argparse.ArgumentParser(description='BiternionNet training')
parser.add_argument("-c", "--criterion",
type=str, default='cosine',
help='Training criterion: `cosine` or `von-mises`',
)
parser.add_argument("-e", "--epochs",
type=int, default=3,
help='Number of epochs to train.'
)
parser.add_argument("-d", "--datadir",
type=str, default=".",
help="Location of training data. Needs `4x` and `4p` subfolders."
)
parser.add_argument("-o", "--output",
type=argparse.FileType('w'), default="biternion-net.npz",
help="File to save the learned model as."
)
parser.add_argument("-m", "--modeldir",
type=str, default=modeldir,
help="Search-path for network description files."
)
parser.add_argument("-n", "--net",
type=str, default="head_50_50",
help="Name of the python file containing the net definition (without .py, in the `net` subfolder.)"
)
args = parser.parse_args()
print(args.criterion + " criterion will be used")
if args.criterion == 'cosine':
crit = BiternionCriterion()
elif args.criterion == 'von-mises':
crit = BiternionCriterion(kappa=1)
else:
print("ERROR: You specified wrong criterion. Sorry =(")
sys.exit(1)
for d in args.modeldir.split(':'):
sys.path.append(d)
netlib = import_module(args.net)
printnow("Loading data from {}\n", args.datadir)
Xtr, ytr, Xte, yte, nte = prepare_data(args.datadir, netlib)
ytr = ytr.astype(df.floatX)
yte = yte.astype(df.floatX)
printnow("Got {:.2f}k training images after flipping\n", len(Xtr)/1000.0)
aug = netlib.mkaug(Xtr, ytr)
net = netlib.mknet()
printnow('Network has {:.3f}M params in {} layers\n', df.utils.count_params(net)/1000.0/1000.0, len(net.modules))
print(net[:21].forward(aug.augbatch_train(Xtr[:100])[0]).shape)
costs = dotrain(net, crit, aug, Xtr, ytr, nepochs=args.epochs)
print("Costs: {}".format(' ; '.join(map(str, costs))))
dostats(net, aug, Xtr, batchsize=64)
# Save the network.
printnow("Saving the learned network to {}\n", args.output)
np.save(args.output, net.__getstate__())
# Prediction, TODO: Move to ROS node.
s = np.argsort(nte)
Xte,yte = Xte[s],yte[s]
printnow("(TEMP) Doing predictions.\n", args.output)
y_pred = dopred_bit(net, aug, Xte, batchsize=64)
# Ensemble the flips!
#res = maad_from_deg(bit2deg(yte), bit2deg(yte))
res = maad_from_deg(bit2deg(y_pred), bit2deg(yte))
printnow("MAE for test images = {:.2f}\n", res.mean())
#y_pred2 = ensemble_biternions([yte[::2], flipbiternions(yte[1::2])])
y_pred2 = ensemble_biternions([y_pred[::2], flipbiternions(y_pred[1::2])])
res = maad_from_deg(bit2deg(y_pred2), bit2deg(yte[::2]))
printnow("MAE for flipped augmented images = {:.2f}\n", res.mean())
|
|
from django.utils.translation import ugettext_lazy as _
PERMISSIONS = (
(
'access_community',
_('Access Community'),
()
),
(
'viewupcoming_community',
_('View Upcoming Meeting'),
('access_community',)
),
(
'viewupcoming_draft',
_('View Upcoming Meeting Before Published'),
('viewupcoming_community',)
),
(
'editagenda_community',
_('Edit Upcoming Agenda'),
()
),
(
'editparticipants_community',
_('Manage Upcoming Meeting Participants'),
()
),
(
'editsummary_community',
_('Edit Summary'),
()
),
(
'editupcoming_community',
_('Edit Upcoming'),
()
),
(
'invite_member',
_('Invite Member'),
()
),
(
'acceptclosed_proposal',
_('Acceptclosed Proposal'),
()
),
(
'acceptopen_proposal',
_('Acceptopen Proposal'),
()
),
(
'add_issue',
_('Add Issue'),
()
),
(
'add_issuecomment',
_('Add Issuecomment'),
()
),
(
'add_proposal',
_('Add Proposal'),
()
),
(
'chairman_vote',
_('Chairman Vote'),
()
),
(
'edit_referendum',
_('Edit Referendum'),
()
),
(
'editclosed_issue',
_('Edit Closed Issue'),
()
),
(
'editclosed_issuecomment',
_('Edit Closed Issuecomment'),
()
),
(
'editclosed_proposal',
_('Edit Closed Proposal'),
()
),
(
'editopen_issue',
_('Edit Open Issue'),
()
),
(
'editopen_issuecomment',
_('Edit Open Issuecomment'),
()
),
(
'editopen_proposal',
_('Edit Open Proposal'),
()
),
(
'edittask_proposal',
_('Edit Task Proposal'),
()
),
(
'move_to_referendum',
_('Move To Referendum'),
()
),
(
'proposal_board_vote',
_('Proposal Board Vote'),
()
),
(
'proposal_board_vote_self',
_('Proposal Board Vote Self'),
()
),
(
'view_proposal_in_discussion',
_('View Proposal In Discussion'),
()
),
(
'view_referendum_results',
_('View Referendum Results'),
()
),
(
'view_update_status',
_('View Update Status'),
()
),
(
'view_straw_vote_result',
_('View straw vote result'),
()
),
(
'viewclosed_issue',
_('View Closed Issue'),
()
),
(
'viewclosed_proposal',
_('View Closed Proposal'),
()
),
(
'viewopen_issue',
_('View Open Issue'),
()
),
(
'viewopen_proposal',
_('View Open Proposal'),
()
),
(
'vote',
_('Vote'),
()
),
(
'vote_ranking',
_('Vote Ranking'),
()
),
(
'add_meeting',
_('Add Meeting'),
()
),
(
'view_meeting',
_('View Meeting'),
()
),
(
'show_member_profile',
_('Show Member Profile'),
()
),
(
'view_confidential',
_('Can view confidential Issue/Proposal'),
()
)
)
CHOICES = [x[:2] for x in PERMISSIONS]
CHOICES_DICT = dict(CHOICES)
ORDER = dict([(x[0], i) for i, x in enumerate(PERMISSIONS)])
|
|
#!/usr/bin/python
import random
# TODO:
# Q: What quantity of middle bandwidth do you need to kill guards?
# A: Intuitively, you need the disable rate % of bandwidth, but you
# might have some edge cases to exploit with min_circs.
PATH_BIAS_PCT = 70
# XXX: Min_circs only actives the "notice" level logs
PATH_BIAS_MIN_CIRCS = 20
# XXX: An int divisor was wrong here. Fix that in Tor. We might
# even want a weighted moving average, but that will be trickier
# to analyze.
PATH_BIAS_SCALE_FACTOR = 50
PATH_BIAS_SCALE_THRESHOLD = 250
# XXX: We should only emit warnings if we are above the scaling threshhold..
PATH_BIAS_WARN_CIRCS = PATH_BIAS_SCALE_THRESHOLD*(PATH_BIAS_SCALE_FACTOR/100.0)
#############################################################
# FIXME: haxxx. Who cares, though?
def reset_globals():
global PATH_BIAS_PCT
global PATH_BIAS_MIN_CIRCS
global PATH_BIAS_SCALE_FACTOR
global PATH_BIAS_SCALE_THRESHOLD
global PATH_BIAS_WARN_CIRCS
PATH_BIAS_PCT = 70
PATH_BIAS_MIN_CIRCS = 20
PATH_BIAS_SCALE_FACTOR = 50
PATH_BIAS_SCALE_THRESHOLD = 250
PATH_BIAS_WARN_CIRCS = PATH_BIAS_SCALE_THRESHOLD*(PATH_BIAS_SCALE_FACTOR/100.0)
####################### Guard Types #########################
# Normal Guard experiences the average circuit failure rate
# of the network as a whole
class Guard:
def __init__(self, succeed_rate):
self.first_hops_total = 0
self.success_total = 0
self._first_hops = 0
self._success = 0
self.succeed_rate = succeed_rate
self.rejected_count = 0
def reset(self):
self._success = 0
self._first_hops = 0
def reject_if_bad(self):
if self.is_bad():
self.reset()
self.rejected_count += 1
def reject_rate(self):
return self.rejected_count/float(self.first_hops_total)
def _get_rate(self):
return self._success/float(self._first_hops)
def is_bad(self):
return self._first_hops >= PATH_BIAS_MIN_CIRCS and \
(self._get_rate() < (PATH_BIAS_PCT/100.0))
def build_circuit(self):
self._inc_first_hop()
if random.random() < self.succeed_rate:
self._inc_success()
# Client may give up on us after this circuit
self.reject_if_bad()
def circ_fail_count(self):
return self._first_hops - self._success
def _inc_first_hop(self):
self._first_hops += 1
self.first_hops_total += 1
if self._first_hops > PATH_BIAS_SCALE_THRESHOLD:
self._first_hops *= PATH_BIAS_SCALE_FACTOR/100.0
self._success *= PATH_BIAS_SCALE_FACTOR/100.0
def _inc_success(self):
self._success += 1
self.success_total += 1
# EvilGuard collects statistics on how evil he is, but doesn't
# actually implement any evilness
class EvilGuard(Guard):
def __init__(self, succeed_rate, adversary_capacity):
Guard.__init__(self, succeed_rate)
self.adversary_capacity = adversary_capacity # c/n probability of malicious exit
self.capture_count = 0
def pwnt_per_client(self):
return self.capture_count/float(self.rejected_count+1)
def capture_rate(self):
return self.capture_count/float(self.first_hops_total)
def compromise_rate(self):
return self.capture_count/float(self.success_total)
# PassiveEvilGuard uses a non-destructive long-term timing-based
# tagging attack to fully correlate circuits end-to-end with 100%
# accuracy. PassiveEvilGuard does not kill any circuits.
class PassiveEvilGuard(EvilGuard):
def __init__(self, succeed_rate, adversary_capacity):
EvilGuard.__init__(self, succeed_rate, adversary_capacity)
def build_circuit(self):
self._inc_first_hop()
# The presence of a malicious exit is a prior probability governed by the
# client. Decide it now.
got_malicious_exit = False
if random.random() < self.adversary_capacity:
got_malicious_exit = True
if random.random() < self.succeed_rate:
if got_malicious_exit: # via timing-based tagging attack
self._inc_success()
self.capture_count += 1
else:
self._inc_success() # "Better luck next time :/"
# Client may give up on us after this circuit
self.reject_if_bad()
# UnrepentantEvilGuard uses a destructive tagging attack to
# fully correlate circuits end-to-end with 100%
# accuracy, as well as to kill uncorrelated circuits.
#
# UnrepentantEvilGuard doesn't care if there is a defense or
# not.
class UnrepentantEvilGuard(EvilGuard):
def __init__(self, succeed_rate, adversary_capacity):
EvilGuard.__init__(self, succeed_rate, adversary_capacity)
def build_circuit(self):
self._inc_first_hop()
# The presence of a malicious exit is a prior probability governed by the
# client. Decide it now.
got_malicious_exit = False
if random.random() < self.adversary_capacity:
got_malicious_exit = True
if random.random() < self.succeed_rate:
if got_malicious_exit: # via tagging attack
self._inc_success()
self.capture_count += 1
else:
pass # "We can't deanon it? Who cares then?"
# Client may give up on us after this circuit
self.reject_if_bad()
# OmniscientEvilGuard is the worst-case adversary against
# the path bias counters implemented in Tor 0.2.3.17.
#
# OmniscientEvilGuard knows client path counts, when they are about to
# think it's bad, and when they scale, and tries to use all of these
# to fail what it can to bias client paths without appearing bad to
# them.
#
# Further in favor of the adversary, we assume that their circuit
# failure rate is actually less than the network average by
# the fraction of the network that they control (because the rest
# of the network experiences this circuit failure as part of the
# average failure).
#
# Further still, OmnscientEvilGuard is *so* omnsicient, it even knows
# when circuits will fail due to ambient noise, so it never gets
# killed by chance. (It is debatable how much this helps.. a
# smart adversary could play the stats close enough to the line
# to approach this omniscience asymptotically).
#
# Note: These omniscience assumptions all favor the attacker,
# but they also simplify analysis to get worst-case bounds easily.
#
# XXX: Introducing some fuzz into our scaling count and/or rate might
# help remove this exact omniscience in practice?
class OmniscientEvilGuard(EvilGuard):
def __init__(self, succeed_rate, adversary_capacity):
EvilGuard.__init__(self, succeed_rate, adversary_capacity)
def look_ahead(self, n):
self.prev_first_hops = self._first_hops
self.prev_success = self._success
self.prev_first_hops_total = self.first_hops_total
self.prev_success_total = self.success_total
for i in xrange(n):
self._inc_first_hop()
def stop_looking(self):
self._first_hops = self.prev_first_hops
self._success = self.prev_success
self.first_hops_total = self.prev_first_hops_total
self.success_total = self.prev_success_total
# This guard should never get caught
def reject_if_bad(self):
assert not self.is_bad()
def build_circuit(self):
self._inc_first_hop()
# The presence of a malicious exit is a prior probability governed by the
# client. Decide it now.
got_malicious_exit = False
if random.random() < self.adversary_capacity:
got_malicious_exit = True
# In reality, OmniscientEvilGuard sees less failure because some
# of the failure in the network is due to other colluding nodes.
#if random.random() < self.succeed_rate + self.adversary_capacity:
#
# Note: We cut this out, because it favors the attacker to do so.
# It removes the risk of elimination by chance (which they could mitigate
# for an unknown but possibly small cost).
if True:
if got_malicious_exit: # via tagging attack
self.capture_count += 1
self._inc_success() # "We built a circuit! Yay!"
else:
# Look-ahead only needs to be non-zero to mitigate risk of random rejection
self.look_ahead(0)
if (self._get_rate() <= (PATH_BIAS_PCT/100.0)):
self.stop_looking()
self._inc_success() # "I better be good! don't want to get caught.."
else:
pass # Fail the circuit by doing nothing. It's not useful
# Client may give up on us after this circuit
self.reject_if_bad()
# ProbabalisticEvilGuard only fails untagged circuits pct_below_path_bias
# below the warning rate
class ProbabalisticEvilGuard(EvilGuard):
def __init__(self, succeed_rate, adversary_capacity, pct_below_path_bias):
EvilGuard.__init__(self, succeed_rate, adversary_capacity)
# FIXME: There may be an optimal point where pct_below_path_bias
# is the lowest possible value that the adversary expects to control?
# Doesn't seem to be worth probing, though
self.path_bias_rate = (PATH_BIAS_PCT - pct_below_path_bias)/100.0
assert self.path_bias_rate <= 1.0
def build_circuit(self):
self._inc_first_hop()
# The presence of a malicious exit is a prior probability governed by the
# client. Decide it now.
got_malicious_exit = False
if random.random() < self.adversary_capacity:
got_malicious_exit = True
# ProbabalisticGamingGuard sees less failure because some
# of the failure in the network is due to other colluding nodes.
if random.random() < self.succeed_rate + self.adversary_capacity:
if got_malicious_exit: # via tagging attack
self._inc_success()
self.capture_count += 1
elif not self.success_total or \
self.success_total/float(self.first_hops_total) <= self.path_bias_rate:
# "Uh oh, we're failing too much, better let some through"
self._inc_success()
else:
pass # Fail the circuit by doing nothing. It's not useful
# Client may give up on us after this circuit
self.reject_if_bad()
####################### Testing and Simulation #########################
def simulate_circs_until(g, circ_count, say_when):
for i in xrange(circ_count):
g.build_circuit()
if say_when(g):
return True
return say_when(g)
# Variables:
# success_rate
# PATH_BIAS_MIN_CIRCS = 20
# PATH_BIAS_PCT = 70
def startup_false_positive_test(trials, success_rate, min_circs, path_bias_pct):
# FIXME: Look it's just easier this way, ok? Get off my back already
global PATH_BIAS_MIN_CIRCS
global PATH_BIAS_PCT
PATH_BIAS_MIN_CIRCS = min_circs
PATH_BIAS_PCT = path_bias_pct
g = Guard(success_rate)
for i in xrange(1+trials/min_circs):
simulate_circs_until(g, PATH_BIAS_SCALE_THRESHOLD, lambda g: False)
g.reset()
#print g._get_rate()
return g.rejected_count
def reject_false_positive_test(trials, success_rate, scale_circs, path_bias_pct):
# FIXME: Look it's just easier this way, ok? Get off my back already
global PATH_BIAS_MIN_CIRCS
global PATH_BIAS_SCALE_THRESHOLD
global PATH_BIAS_PCT
PATH_BIAS_SCALE_THRESHOLD = scale_circs
PATH_BIAS_PCT = path_bias_pct
g = Guard(success_rate)
# Ignore startup. We don't reject then.
simulate_circs_until(g, PATH_BIAS_SCALE_THRESHOLD, lambda g: False)
g.rejected_count = 0
simulate_circs_until(g, trials, lambda g: False)
return g.rejected_count
def generic_rate_test(g, trials, success_rate, adversary_capacity, path_bias_pct, rate_fcn):
# FIXME: Look it's just easier this way, ok? Get off my back already
global PATH_BIAS_PCT
PATH_BIAS_PCT = path_bias_pct
simulate_circs_until(g, trials, lambda g: False)
if not isinstance(g, UnrepentantEvilGuard):
assert not g.is_bad()
return rate_fcn(g)
def dos_attack_test(success_rate, dos_success_rate, path_bias_pct, scale_thresh):
global PATH_BIAS_PCT
global PATH_BIAS_SCALE_THRESHOLD
PATH_BIAS_PCT = path_bias_pct
PATH_BIAS_SCALE_THRESHOLD = scale_thresh
g = Guard(success_rate)
simulate_circs_until(g, PATH_BIAS_SCALE_THRESHOLD, lambda g: False)
g.rejected_count = 0
g.succeed_rate = dos_success_rate
simulate_circs_until(g, 10000, lambda g: g.rejected_count > 0)
return g.first_hops_total - PATH_BIAS_SCALE_THRESHOLD
################ Multi-Dementianal Analysis #####################
# If brute force doesn't work, you're not using enough
def brute_force(cmptr, functor, ranges, increment):
testpoint = map(lambda p: p[0], ranges)
maxpoint = testpoint
maxval = functor(*testpoint)
print "New extrema at "+str(maxpoint)+": "+str(maxval)
for dementia in xrange(len(ranges)):
if increment[dementia] > 0:
cmpr = lambda x, y: x<y
else:
cmpr = lambda x, y: x>y
value = ranges[dementia][0]
while cmpr(value, ranges[dementia][1]):
value += increment[dementia]
testpoint[dementia] = value
val = functor(*testpoint)
if cmptr(val, maxval):
maxval = val
maxpoint = testpoint
print "New extrema at "+str(maxpoint)+": "+str(maxval)
# FIXME: Haxx
reset_globals()
return maxpoint
def surface_plot(functor, startpoint, ranges, increment):
pass
def gradient_descent(functor, startpoint, ranges, increment):
# Warning, mentat: If brute force doesn't work, you're not using enough
# It might be wise to try to get a 3d color plot/heatmap/some other
# visualization before attempting this?
pass
def main():
#random.seed(23)
if True:
print "==================== P(Compromise|Guard) =========================="
print "\nPassiveEvilGuard compromise rate at [success_rate, adversary_capacity, path_bias_pct]:"
print "(As expected, P(CompromisedExit|PassiveEvilGuard) ~= c/n)"
print brute_force(lambda x,y: x>y,
lambda t, a,b,c:
generic_rate_test(PassiveEvilGuard(a,b), t, a,b,c,
lambda g:
g.compromise_rate()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(10000,10000), (0.75,0.75), (0.05,0.85), (70, 70)],
[0, 0, 0.2, 5])
print "\nUnrepentantEvilGuard compromise rate at [success_rate, adversary_capacity, path_bias_pct]:"
print "(As expected, P(CompromisedExit|UnrepentantEvilGuard) = 1.0)"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(UnrepentantEvilGuard(a,b), t,a,b,c,
lambda g:
g.compromise_rate()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(10000,10000), (0.75,0.75), (0.05,0.85), (70, 70)],
[0, 0, 0.2, 5])
print "\nProbabalisticEvilGuard compromise rate at [success_rate, adversary_capacity, path_bias_pct]:"
print "P(CompromisedExit|ProbabalisticEvilGuard) <= (c/n)*(100/PATH_BIAS_PCT)"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(ProbabalisticEvilGuard(a,b,5),
t,a,b,c,
lambda g:
g.compromise_rate()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(10000,10000), (0.75,0.75), (0.05,0.85), (70, 70)],
[0, 0, 0.2, 5])
print "\nOmniscientEvilGuard compromise rate at [success_rate, adversary_capacity, path_bias_pct]:"
print "P(CompromisedExit|OmniscientEvilGuard) <= (c/n)*(100/PATH_BIAS_PCT)"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(OmniscientEvilGuard(a,b), t,a,b,c,
lambda g:
g.compromise_rate()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(10000,10000), (0.75,0.75), (0.05,0.85), (70, 70)],
[0, 0, 0.2, 5])
print "\nOmniscientEvilGuard compromise at [success_rate, adversary_capacity, path_bias_pct]:"
print "P(CompromisedExit|OmniscientEvilGuard) <= (c/n)*(100/PATH_BIAS_PCT)"
print brute_force(lambda x,y: x<y,
lambda t,a,b,c:
generic_rate_test(OmniscientEvilGuard(a,b), t,a,b,c,
lambda g:
g.compromise_rate()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(10000,10000), (0.75,0.75), (0.20,0.20), (20, 80)],
[0, 0, 0.05, 20])
if True:
print "\n\n==================== Circuits pwnt per client ========================="
print "\nUnrepentantEvilGuard compromised circs at [success_rate, adversary_capacity, path_bias_pct]:"
print "circs_per_client ~= success_rate*c/n*MIN_CIRCS for c/n < PATH_BIAS_PCT || c/n < success_rate"
print " ~= success_rate*circ_attempts*c/n for c/n > PATH_BIAS_PCT && c/n > success_rate"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(UnrepentantEvilGuard(a,b), t,a,b,c,
lambda g:
g.pwnt_per_client()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(100000,100000), (0.75,0.75), (0.05,0.85), (50, 50)],
[0, 0, 0.2, 5])
print "\nPassiveEvilGuard compromised circs at [success_rate, adversary_capacity, path_bias_pct]:"
print "circs_per_client ~= success_rate * circ_attempts * c/n"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(PassiveEvilGuard(a,b),
t,a,b,c,
lambda g:
g.pwnt_per_client()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(100000,100000), (0.75,0.75), (0.05,0.85), (50, 50)],
[0, 0, 0.2, 5])
print "\nProbabalisticEvilGuard compromised circs at [success_rate, adversary_capacity, path_bias_pct]:"
print "circs_per_client ~= success_rate * circ_attempts * c/n"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(ProbabalisticEvilGuard(a,b,5),
t,a,b,c,
lambda g:
g.pwnt_per_client()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(100000,100000), (0.75,0.75), (0.05,0.85), (50, 50)],
[0, 0, 0.2, 5])
print "\nOmniscientEvilGuard compromised circs at [success_rate, adversary_capacity, path_bias_pct]:"
print "circs_per_client ~= circ_attempts * c/n"
print brute_force(lambda x,y: x>y,
lambda t,a,b,c:
generic_rate_test(OmniscientEvilGuard(a,b), t,a,b,c,
lambda g:
g.pwnt_per_client()),
#generic_rate_test(trials, success_rate, adversary_capacity, path_bias_pct):
[(100000,100000), (0.75,0.75), (0.05,0.85), (50, 50)],
[0, 0, 0.2, 5])
if True:
print "\n\n===================== False Positives ============================"
print "\nStartup false positive counts at [num_circs, success_rate, min_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs min_circs)"
print brute_force(lambda x,y: x<y,
startup_false_positive_test,
#false_positive_test(num_circs, success_rate, min_circs, path_bias_pct):
[(1000000,1000000), (0.80, 0.80), (25,250), (70, 70)],
[0, -0.1, 25, 5])
print "\nStartup false positive counts at [num_circs, success_rate, min_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs min_circs)"
print brute_force(lambda x,y: x<y,
startup_false_positive_test,
#false_positive_test(num_circs, success_rate, min_circs, path_bias_pct):
[(1000000,1000000), (0.45, 0.45), (25,250), (30, 30)],
[0, -0.1, 25, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.70, 0.70), (100,500), (70, 70)],
[0, -0.1, 50, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.75, 0.75), (100,500), (70, 70)],
[0, -0.1, 50, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.80, 0.80), (100,500), (70, 70)],
[0, -0.1, 50, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.55, 0.55), (100,500), (50, 50)],
[0, -0.1, 50, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.60, 0.60), (100,500), (50, 50)],
[0, -0.1, 50, 5])
print "\nFalse positive counts at [num_circs, success_rate, scale_circs, path_bias_pct]:"
print "(Results are some function of success_rate - path_bias_pct vs scale_circs)"
print brute_force(lambda x,y: x<y,
reject_false_positive_test,
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(1000000,1000000), (0.45, 0.45), (100,500), (30, 30)],
[0, -0.1, 50, 5])
if True:
print "\n\n===================== DoS Attack Duration ========================"
print "\nDoS attack durations (in circs) at [success_rate, dos_success_rate, path_bias_pct, scale_thresh]:"
print brute_force(lambda x,y: x<y,
dos_attack_test,
#dos_attack_test(g, num_circs, success_rate, dos_success_rate, path_bias_pct):
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(0.80, 0.80), (0.25,0.05), (30, 30), (300, 300)],
[-0.1, -0.05, 5, 100])
print "\nDoS attack durations (in circs) at [success_rate, dos_success_rate, path_bias_pct, scale_thresh]:"
print brute_force(lambda x,y: x>y,
dos_attack_test,
#dos_attack_test(g, num_circs, success_rate, dos_success_rate, path_bias_pct):
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(0.80, 0.80), (0.25,0.25), (30, 30), (200, 1000)],
[-0.1, -0.1, 5, 100])
print "\nDoS attack durations (in circs) at [success_rate, dos_success_rate, path_bias_pct, scale_thresh]:"
print brute_force(lambda x,y: x>y,
dos_attack_test,
#dos_attack_test(g, num_circs, success_rate, dos_success_rate, path_bias_pct):
#false_positive_test(num_circs, success_rate, scale_circs, path_bias_pct):
[(0.80, 0.80), (0.05,0.05), (30, 30), (200, 1000)],
[-0.1, -0.1, 5, 100])
if __name__ == "__main__":
main() #sys.argv)
|
|
#! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import sys
import os
import getopt
import time
import datetime
import h5py
import numpy as np
import matplotlib.pyplot as plt
import getopt
def yyyymmdd2years(date):
d = datetime.datetime(*time.strptime(date,"%Y%m%d")[0:5])
yy = np.float(d.year) + np.float(d.month-1)/12 + np.float(d.day-1)/365
return yy
######################################
def Usage():
print '''
Estimating displacement velocity for each pixel.
It also generates the standadrd deviation of the velocity and the RMSE.
Usage:
timeseries2velocity.py -f timeSeriesFile.h5 -o OutputName.h5 -m Maximum date -d a list of dates to exclude
Example:
timeseries2velocity.py timeSeriesFile.h5
timeseries2velocity.py -f timeSeries.h5 -m 20080201
timeseries2velocity.py -f timeSeries_demCor.h5 -d '20040502 20060708 20090103'
timeseries2velocity.py -f timeSeries_demCor.h5 -o velocity_demCor.h5
'''
######################################
def main(argv):
# try:
# timeSeriesFile = argv[0]
# except:
# Usage() ; sys.exit(1)
if len(sys.argv)>2:
try:
opts, args = getopt.getopt(argv,"f:d:m:h:o:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt == '-f':
timeSeriesFile = arg
elif opt == '-d':
datesNot2include = arg
elif opt == '-m':
maxDate = arg
elif opt == '-o':
outName = arg
elif len(sys.argv)==2:
if argv[0]=='-h':
Usage(); sys.exit(1)
elif os.path.isfile(argv[0]):
timeSeriesFile = argv[0]
else:
Usage(); sys.exit(1)
#elif len(sys.argv)<2:
else:
Usage(); sys.exit(1)
##############################################################
print "Loading time series file: " + timeSeriesFile
h5timeseries = h5py.File(timeSeriesFile)
dateList1 = h5timeseries['timeseries'].keys()
##############################################################
print 'All dates exit:'
print dateList1
print '*******************'
try:
datesNot2include
except:
datesNot2include=[]
# maxDate='20100521'
try:
maxDate
maxDateyy=yyyymmdd2years(maxDate)
print maxDateyy
for date in dateList1:
yy=yyyymmdd2years(date)
if yy > maxDateyy:
print yy
datesNot2include.append(date)
except:
try:
datesNot2include
except:
datesNot2include=[]
try:
# datesNot2include = '20100903 20100730 20100625 20100521 20100416'
dateList=[]
for date in dateList1:
if date not in datesNot2include:
dateList.append(date)
except:
dateList=dateList1
print 'using all dates to calculate the vlocity'
print '--------------------------------------------'
print 'dates used to estimate the velocity:'
print dateList
print '--------------------------------------------'
##############################################################
dateIndex={}
for ni in range(len(dateList)):
dateIndex[dateList[ni]]=ni
tbase=[]
d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])
for ni in range(len(dateList)):
d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
diff = d2-d1
tbase.append(diff.days)
dates=[]
for ni in range(len(dateList)):
d = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
dates.append(d)
# print 'Index and dates from ' + timeSeriesFile
# for ni in range(len(dates)):
# print ni,dates[ni]
###########################################
print 'Calculating Velocity'
datevector=[]
for i in range(len(dates)):
datevector.append(np.float(dates[i].year) + np.float(dates[i].month-1)/12 + np.float(dates[i].day-1)/365)
B=np.ones([len(datevector),2])
B[:,0]=datevector
#B1 = np.linalg.pinv(B)
B1 = np.dot(np.linalg.inv(np.dot(B.T,B)),B.T)
B1 = np.array(B1,np.float32)
#########################################
dset = h5timeseries['timeseries'].get(h5timeseries['timeseries'].keys()[0])
# timeseries = np.zeros((len(h5timeseries['timeseries'].keys()),np.shape(dset)[0],np.shape(dset)[1]),np.float32)
# for date in h5timeseries['timeseries'].keys():
# timeseries[dateIndex[date]] = h5timeseries['timeseries'].get(date)
timeseries = np.zeros((len(dateList),np.shape(dset)[0],np.shape(dset)[1]),np.float32)
for date in dateList:
timeseries[dateIndex[date]] = h5timeseries['timeseries'].get(date)
lt,rows,cols=np.shape(timeseries)
numpixels=rows*cols
Data=np.zeros([lt,numpixels])
for i in range(lt):
Data[i,:]=np.reshape(timeseries[i],[1,numpixels])
x=np.dot(B1,Data)
velocity=np.reshape(x[0,:],[rows,cols])
# plt.imshow(velocity,vmin=-0.02, vmax=.02)
# plt.colorbar()
# plt.show()
#####################################################
print 'Calculating rmse'
Data_linear=np.dot(B,x)
rmse=np.reshape(np.sqrt((np.sum((Data_linear-Data)**2,0))/lt),[rows,cols])
# se=np.reshape((np.sum(np.abs(Data_linear-Data),0)/lt),[rows,cols])
# rmse=np.reshape((np.sum((Data_linear-Data)**2,0))/lt,[rows,cols])
######################################################
print 'Calculating the standard deviation of the estimated velocities'
residual=Data_linear-Data
s1=np.sqrt(np.sum(residual**2,0)/(lt-2))
s2=np.sqrt(np.sum((datevector-np.mean(datevector))**2))
se=np.reshape(s1/s2,[rows,cols])
######################################################
# SSt=np.sum((Data-np.mean(Data,0))**2,0)
# SSres=np.sum(residual**2,0)
# SS_REG=SSt-SSres
# Rsquared=np.reshape(SS_REG/SSt,[rows,cols])
######################################################
# covariance of the velocities
######################################################
# h5file = projectDir+'/velocity_'+projectName+'.h5'
# print 'saving results to hdf5 file'
try:
outName
outName_rmse='rmse_'+outName
outName_se='std_'+outName
outName_Rsquared='R2_'+outName
except:
outName='velocity.h5'
outName_rmse='rmse_velocity.h5'
outName_se='std_velocity.h5'
outName_Rsquared='R2_velocity.h5'
# try:
# h5file = argv[1]
# print 'writing velocity to '+argv[1]
# except:
# h5file = 'velocity.h5'
# print 'writing to velocity.h5'
print '--------------------------------------'
print 'writing to '+outName
h5velocity = h5py.File(outName,'w')
group=h5velocity.create_group('velocity')
dset = group.create_dataset('velocity', data=velocity, compression='gzip')
group.attrs['date1'] = datevector[0]
group.attrs['date2'] = datevector[lt-1]
for key , value in h5timeseries['timeseries'].attrs.iteritems():
group.attrs[key]=value
h5velocity.close()
# h5timeseries.close()
print '--------------------------------------'
print 'writing to '+outName_rmse
h5file = outName_rmse
h5rmse = h5py.File(h5file,'w')
group=h5rmse.create_group('rmse')
dset = group.create_dataset(os.path.basename('rmse'), data=rmse, compression='gzip')
group.attrs['date1'] = datevector[0]
group.attrs['date2'] = datevector[lt-1]
for key , value in h5timeseries['timeseries'].attrs.iteritems():
group.attrs[key]=value
print '--------------------------------------'
print 'writing to '+outName_se
h5se = h5py.File(outName_se,'w')
group=h5se.create_group('rmse')
dset = group.create_dataset('rmse', data=se, compression='gzip')
group.attrs['date1'] = datevector[0]
group.attrs['date2'] = datevector[lt-1]
for key , value in h5timeseries['timeseries'].attrs.iteritems():
group.attrs[key]=value
print '--------------------------------------'
# print 'writing to '+outName_Rsquared
# h5rsquared = h5py.File(outName_Rsquared,'w')
# group=h5rsquared.create_group('rmse')
# dset = group.create_dataset('rmse', data=Rsquared, compression='gzip')
# group.attrs['date1'] = datevector[0]
# group.attrs['date2'] = datevector[lt-1]
# for key , value in h5timeseries['timeseries'].attrs.iteritems():
# group.attrs[key]=value
# h5rsquared.close()
h5se.close()
h5rmse.close()
h5timeseries.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
import socket
from unittest import TestCase
from mock.mock import patch
class TestHDP23StackAdvisor(TestCase):
def setUp(self):
import imp
self.maxDiff = None
self.testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp21StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
hdp22StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
hdp23StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.3/services/stack_advisor.py')
hdp23StackAdvisorClassName = 'HDP23StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp206StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp21StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp22StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp22StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp23StackAdvisorPath, 'rb') as fp:
stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp23StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(stack_advisor_impl, hdp23StackAdvisorClassName)
self.stackAdvisor = clazz()
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
def load_json(self, filename):
file = os.path.join(self.testDirectory, filename)
with open(file, 'rb') as f:
data = json.load(f)
return data
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def fqdn_mock_result(value=None):
return 'c6401.ambari.apache.org' if value is None else value
@patch('socket.getfqdn', side_effect=fqdn_mock_result)
def test_getComponentLayoutValidations_sparkts_no_hive(self, socket_mock):
""" Test SparkTS is picked when Hive is not installed """
hosts = self.load_json("sparkts-host.json")
services = self.load_json("services-sparkts.json")
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
sparkTS = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SPARK_THRIFTSERVER"]
hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
self.assertEquals(len(sparkTS), 1)
self.assertEquals(len(hiveMetaStore), 0)
validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
expected = {'component-name': 'SPARK_THRIFTSERVER', 'message': 'SPARK_THRIFTSERVER requires HIVE_METASTORE to be selected/deployed.', 'type': 'host-component', 'level': 'ERROR'}
self.assertEquals(validations[0], expected)
@patch('socket.getfqdn', side_effect=fqdn_mock_result)
def test_getComponentLayoutValidations_sparkts_with_hive(self, socket_mock):
""" Test SparkTS is picked when Hive is installed """
hosts = self.load_json("sparkts-host.json")
services = self.load_json("services-sparkts-hive.json")
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
sparkTS = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SPARK_THRIFTSERVER"]
hiveMetaStore = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "HIVE_METASTORE"]
self.assertEquals(len(sparkTS), 1)
self.assertEquals(len(hiveMetaStore), 1)
validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
self.assertEquals(len(validations), 0)
def test_recommendHDFSConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
"dfs.namenode.inode.attributes.provider.class": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
}
},
"ranger-hdfs-plugin-properties": {
"properties": {
"ranger-hdfs-plugin-enabled": "No"
}
}
}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
hosts = {
"items": [
{
"Hosts": {
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
}]}
services = {
"services":
[
{
"StackServices": {
"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components": [
]
}
],
"Versions": {
"stack_version": "2.3"
},
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
# Test with Ranger HDFS plugin disabled
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hdfs-site']['property_attributes']['dfs.namenode.inode.attributes.provider.class'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
# Test with Ranger HDFS plugin is enabled
configurations['hdfs-site']['properties'] = {}
configurations['hdfs-site']['property_attributes'] = {}
services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations['hdfs-site']['properties']['dfs.namenode.inode.attributes.provider.class'], 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer', "Test with Ranger HDFS plugin is enabled")
def test_recommendYARNConfigurations(self):
configurations = {}
servicesList = ["YARN"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
],
"public_host_name" : "c6401.ambari.apache.org",
"host_name" : "c6401.ambari.apache.org"
}
}
]
}
services = {
"context" : {
"call_type" : "recommendConfigurations"
},
"services" : [ {
"StackServices":{
"service_name": "YARN",
},
"Versions": {
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NODEMANAGER",
"hostnames": ["c6401.ambari.apache.org"]
}
}
]
}
],
"configurations": {
"yarn-site": {
"properties": {
"yarn.authorization-provider": "org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer"
}
},
"ranger-yarn-plugin-properties": {
"properties": {
"ranger-yarn-plugin-enabled": "No"
}
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
# Test with Ranger YARN plugin disabled
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['yarn-site']['property_attributes']['yarn.authorization-provider'], {'delete': 'true'}, "Test with Ranger HDFS plugin is disabled")
# Test with Ranger YARN plugin is enabled
configurations['yarn-site']['properties'] = {}
configurations['yarn-site']['property_attributes'] = {}
services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['yarn-site']['properties']['yarn.authorization-provider'], 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer', "Test with Ranger YARN plugin enabled")
def test_recommendKAFKAConfigurations(self):
configurations = {}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
services = {
"services":
[
{
"StackServices": {
"service_name" : "KAFKA",
"service_version" : "2.6.0.2.2"
}
},
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0.2.3"
}
},
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}]
},
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["host1"]
}
}]
}
],
"Versions": {
"stack_version": "2.3"
},
"configurations": {
"core-site": {
"properties": {}
},
"cluster-env": {
"properties": {
"security_enabled" : "true"
},
"property_attributes": {}
},
"kafka-broker": {
"properties": {
"authorizer.class.name" : "kafka.security.auth.SimpleAclAuthorizer"
},
"property_attributes": {}
},
"ranger-kafka-plugin-properties": {
"properties": {
"ranger-kafka-plugin-enabled": "No",
"zookeeper.connect": ""
}
},
"kafka-log4j": {
"properties": {
"content": "kafka.logs.dir=logs"
}
},
"zoo.cfg" : {
"properties": {
"clientPort": "2181"
}
}
}
}
# Test authorizer.class.name with Ranger Kafka plugin disabled in non-kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['property_attributes']['authorizer.class.name'], {'delete': 'true'}, "Test authorizer.class.name with Ranger Kafka plugin is disabled in non-kerberos environment")
# Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol'] = 'PLAINTEXTSASL'
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'kafka.security.auth.SimpleAclAuthorizer' , "Test authorizer.class.name with Ranger Kafka plugin disabled in kerberos environment")
# Test authorizer.class.name with Ranger Kafka plugin enabled in non-kerberos environment
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
del services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol']
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
services['configurations']['cluster-env']['properties']['security_enabled'] = "false"
configurations['kafka-broker']['properties'] = {}
configurations['kafka-broker']['property_attributes'] = {}
services['configurations']['kafka-broker']['properties']['security.inter.broker.protocol'] = 'PLAINTEXTSASL'
services['configurations']['kafka-broker']['properties']['authorizer.class.name'] = 'kafka.security.auth.SimpleAclAuthorizer'
services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'] = 'Yes'
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['kafka-broker']['properties']['authorizer.class.name'], 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer', "Test authorizer.class.name with Ranger Kafka plugin enabled in kerberos environment")
self.assertEquals(configurations['ranger-kafka-plugin-properties']['properties']['zookeeper.connect'], 'host1:2181')
# Test kafka-log4j content when Ranger plugin for Kafka is enabled
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
log4jContent = services['configurations']['kafka-log4j']['properties']['content']
newRangerLog4content = "\nlog4j.appender.rangerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.rangerAppender.DatePattern='.'yyyy-MM-dd-HH\n" \
"log4j.appender.rangerAppender.File=${kafka.logs.dir}/ranger_kafka.log\nlog4j.appender.rangerAppender.layout" \
"=org.apache.log4j.PatternLayout\nlog4j.appender.rangerAppender.layout.ConversionPattern=%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n\n" \
"log4j.logger.org.apache.ranger=INFO, rangerAppender"
expectedLog4jContent = log4jContent + newRangerLog4content
self.assertEquals(configurations['kafka-log4j']['properties']['content'], expectedLog4jContent, "Test kafka-log4j content when Ranger plugin for Kafka is enabled")
# Test kafka.metrics.reporters when AMBARI_METRICS is present in services
self.stackAdvisor.recommendKAFKAConfigurations(configurations, clusterData, services, None)
self.assertEqual(configurations['kafka-broker']['properties']['kafka.metrics.reporters'],
'org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter')
def test_recommendHBASEConfigurations(self):
configurations = {}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
expected = {
"hbase-site": {
"properties": {
"hbase.bucketcache.size": "92160",
"hbase.bucketcache.percentage.in.combinedcache": "1.0000",
"hbase.regionserver.global.memstore.size": "0.4",
"hfile.block.cache.size": "0.4",
"hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint",
"hbase.coprocessor.master.classes": "",
"hbase.coprocessor.regionserver.classes": "",
"hbase.region.server.rpc.scheduler.factory.class": "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory",
'hbase.regionserver.wal.codec': 'org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec',
"hbase.bucketcache.ioengine": "offheap",
"phoenix.functions.allowUserDefinedFunctions": "true"
},
"property_attributes": {
"hbase.coprocessor.regionserver.classes": {
"delete": "true"
},
"hbase.bucketcache.percentage.in.combinedcache": {
"delete": "true"
}
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "1024",
"hbase_max_direct_memory_size": "94208",
"hbase_regionserver_heapsize": "20480"
}
}
}
services = {
"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"Versions": {
"stack_version": "2.3"
},
"configurations": {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "2048"
}
},
"hbase-env": {
"properties": {
"phoenix_sql_enabled": "true"
}
},
"hbase-site": {
"properties": {
"hbase.coprocessor.regionserver.classes": ""
}
}
}
}
# Test
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test
clusterData['hbaseRam'] = '4'
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.size"] = {"delete": "true"}
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.ioengine"] = {"delete": "true"}
expected["hbase-site"]["property_attributes"]["hbase.bucketcache.percentage.in.combinedcache"] = {"delete": "true"}
expected["hbase-env"]["property_attributes"] = {"hbase_max_direct_memory_size" : {"delete": "true"}}
expected["hbase-env"]["properties"]["hbase_master_heapsize"] = "1024"
expected["hbase-env"]["properties"]["hbase_regionserver_heapsize"] = "4096"
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test - default recommendations should have certain configs deleted. HAS TO BE LAST TEST.
services["configurations"] = {"hbase-site": {"properties": {"phoenix.functions.allowUserDefinedFunctions": '', "hbase.rpc.controllerfactory.class": '', "hbase.region.server.rpc.scheduler.factory.class": ''}}}
configurations = {}
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations['hbase-site']['property_attributes']['phoenix.functions.allowUserDefinedFunctions'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['property_attributes']['hbase.rpc.controllerfactory.class'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['property_attributes']['hbase.region.server.rpc.scheduler.factory.class'], {'delete': 'true'})
self.assertEquals(configurations['hbase-site']['properties']['hbase.regionserver.wal.codec'], "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec")
def test_recommendHiveConfigurations(self):
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '8192'
}
},
'hive-env': {
'properties': {
'hive_exec_orc_storage_strategy': 'SPEED',
'hive_security_authorization': 'None',
'hive_timeline_logging_enabled': 'true',
'hive_txn_acid': 'off',
'hive.atlas.hook': 'false'
}
},
'hive-site': {
'properties': {
'hive.server2.enable.doAs': 'true',
'hive.server2.tez.default.queues': "queue1,queue2",
'hive.server2.tez.initialize.default.sessions': 'false',
'hive.server2.tez.sessions.per.default.queue': '1',
'hive.auto.convert.join.noconditionaltask.size': '214748364',
'hive.compactor.initiator.on': 'false',
'hive.compactor.worker.threads': '0',
'hive.compute.query.using.stats': 'true',
'hive.exec.dynamic.partition.mode': 'strict',
'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.orc.compression.strategy': 'SPEED',
'hive.exec.orc.default.compress': 'ZLIB',
'hive.exec.orc.default.stripe.size': '67108864',
'hive.exec.orc.encoding.strategy': 'SPEED',
'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.reducers.bytes.per.reducer': '67108864',
'hive.execution.engine': 'mr',
'hive.optimize.index.filter': 'true',
'hive.optimize.sort.dynamic.partition': 'false',
'hive.prewarm.enabled': 'false',
'hive.prewarm.numcontainers': '3',
'hive.security.authorization.enabled': 'false',
'hive.server2.use.SSL': 'false',
'hive.stats.fetch.column.stats': 'true',
'hive.stats.fetch.partition.stats': 'true',
'hive.support.concurrency': 'false',
'hive.tez.auto.reducer.parallelism': 'true',
'hive.tez.container.size': '768',
'hive.tez.dynamic.partition.pruning': 'true',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
'hive.vectorized.execution.enabled': 'true',
'hive.vectorized.execution.reduce.enabled': 'false',
'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory'
},
'property_attributes': {
'hive.auto.convert.join.noconditionaltask.size': {'maximum': '644245094'},
'hive.server2.authentication.pam.services': {'delete': 'true'},
'hive.server2.custom.authentication.class': {'delete': 'true'},
'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
'hive.server2.authentication.ldap.url': {'delete': 'true'},
'hive.server2.tez.default.queues': {
'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 'queue2', 'label': 'queue2 queue'}]
},
'atlas.cluster.name': {'delete': 'true'},
'atlas.rest.address': {'delete': 'true'},
'datanucleus.rdbms.datastoreAdapterClassName': {'delete': 'true'},
'hive.tez.container.size': {'maximum': '8192', 'minimum': '256'}
}
},
'hiveserver2-site': {
'properties': {
},
'property_attributes': {
'hive.security.authorization.manager': {'delete': 'true'},
'hive.security.authenticator.manager': {'delete': 'true'}
}
},
'webhcat-site': {
'properties': {
'templeton.hadoop.queue.name': 'queue2'
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"capacity-scheduler": {
"properties": {
"capacity-scheduler" :"yarn.scheduler.capacity.root.queues=queue1,queue2"
}
},
"hive-env": {
"properties": {
"hive.atlas.hook": "false"
}
},
"hive-site": {
"properties": {
"hive.server2.authentication": "none",
"hive.server2.authentication.ldap.url": "",
"hive.server2.authentication.ldap.baseDN": "",
"hive.server2.authentication.kerberos.keytab": "",
"hive.server2.authentication.kerberos.principal": "",
"hive.server2.authentication.pam.services": "",
"hive.server2.custom.authentication.class": "",
"hive.cbo.enable": "true"
}
},
"hiveserver2-site": {
"properties": {
"hive.security.authorization.manager": "",
"hive.security.authenticator.manager": ""
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.7
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.8
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.8_44'}
expected['hive-site']['properties']['hive.tez.java.opts'] = "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps"
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.9
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.9.2_44'}
expected['hive-site']['properties']['hive.tez.java.opts'] = "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps"
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendHiveConfigurations_with_atlas(self):
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '8192'
}
},
'hive-env': {
'properties': {
'hive_exec_orc_storage_strategy': 'SPEED',
'hive_security_authorization': 'None',
'hive_timeline_logging_enabled': 'true',
'hive_txn_acid': 'off',
'hive.atlas.hook': 'true'
}
},
'hive-site': {
'properties': {
'hive.server2.enable.doAs': 'true',
'hive.server2.tez.default.queues': "queue1,queue2",
'hive.server2.tez.initialize.default.sessions': 'false',
'hive.server2.tez.sessions.per.default.queue': '1',
'hive.auto.convert.join.noconditionaltask.size': '214748364',
'hive.compactor.initiator.on': 'false',
'hive.compactor.worker.threads': '0',
'hive.compute.query.using.stats': 'true',
'hive.exec.dynamic.partition.mode': 'strict',
'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.orc.compression.strategy': 'SPEED',
'hive.exec.orc.default.compress': 'ZLIB',
'hive.exec.orc.default.stripe.size': '67108864',
'hive.exec.orc.encoding.strategy': 'SPEED',
'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook',
'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.reducers.bytes.per.reducer': '67108864',
'hive.execution.engine': 'mr',
'hive.optimize.index.filter': 'true',
'hive.optimize.sort.dynamic.partition': 'false',
'hive.prewarm.enabled': 'false',
'hive.prewarm.numcontainers': '3',
'hive.security.authorization.enabled': 'false',
'hive.server2.use.SSL': 'false',
'hive.stats.fetch.column.stats': 'true',
'hive.stats.fetch.partition.stats': 'true',
'hive.support.concurrency': 'false',
'hive.tez.auto.reducer.parallelism': 'true',
'hive.tez.container.size': '768',
'hive.tez.dynamic.partition.pruning': 'true',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
'hive.vectorized.execution.enabled': 'true',
'hive.vectorized.execution.reduce.enabled': 'false',
'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider',
'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory'
},
'property_attributes': {
'hive.auto.convert.join.noconditionaltask.size': {'maximum': '644245094'},
'hive.tez.container.size': {'maximum': '8192', 'minimum': '256'},
'hive.server2.authentication.pam.services': {'delete': 'true'},
'hive.server2.custom.authentication.class': {'delete': 'true'},
'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
'hive.server2.authentication.ldap.url': {'delete': 'true'},
'hive.server2.tez.default.queues': {
'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 'queue2', 'label': 'queue2 queue'}]
},
'atlas.cluster.name': {'delete': 'true'},
'atlas.rest.address': {'delete': 'true'},
'datanucleus.rdbms.datastoreAdapterClassName': {'delete': 'true'}
}
},
'hiveserver2-site': {
'properties': {
},
'property_attributes': {
'hive.security.authorization.manager': {'delete': 'true'},
'hive.security.authenticator.manager': {'delete': 'true'}
}
},
'webhcat-site': {
'properties': {
'templeton.hadoop.queue.name': 'queue2'
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
}
],
"configurations": {
"capacity-scheduler": {
"properties": {
"capacity-scheduler" :"yarn.scheduler.capacity.root.queues=queue1,queue2"
}
},
"hive-env": {
"properties": {
"hive.atlas.hook": "false"
}
},
"hive-site": {
"properties": {
"hive.server2.authentication": "none",
"hive.server2.authentication.ldap.url": "",
"hive.server2.authentication.ldap.baseDN": "",
"hive.server2.authentication.kerberos.keytab": "",
"hive.server2.authentication.kerberos.principal": "",
"hive.server2.authentication.pam.services": "",
"hive.server2.custom.authentication.class": "",
"hive.cbo.enable": "true"
}
},
"hiveserver2-site": {
"properties": {
"hive.security.authorization.manager": "",
"hive.security.authenticator.manager": ""
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
@patch('os.path.exists')
@patch('os.path.isdir')
@patch('os.listdir')
def test_recommendTezConfigurations(self, os_listdir_mock, os_isdir_mock, os_exists_mock):
os_exists_mock.return_value = True
os_isdir_mock.return_value = True
os_listdir_mock.return_value = ['TEZ{0.7.0.2.3.0.0-2155}']
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
},
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
},
"tez-site": {
"properties": {
"tez.task.resource.memory.mb": "768",
"tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.runtime.io.sort.mb": "202",
"tez.session.am.dag.submit.timeout.secs": "600",
"tez.runtime.unordered.output.buffer.size-mb": "57",
"tez.am.resource.memory.mb": "4000",
"tez.queue.name": "queue2",
}
},
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192"
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [ ],
"ambari-server-properties": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
server_host = socket.getfqdn()
for host in hosts["items"]:
if server_host == host["Hosts"]["host_name"]:
server_host = host["Hosts"]["public_host_name"]
tez_ui_url = "http://" + server_host + ":8080/#/main/view/TEZ/tez_cluster_instance"
# Test JDK1.7
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.8
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.8_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.9
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.9.2_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateHiveConfigurations(self):
properties = {"hive_security_authorization": "None",
"hive.exec.orc.default.stripe.size": "8388608",
'hive.tez.container.size': '2048',
'hive.tez.java.opts': '-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1100000000'}
recommendedDefaults = {'hive.tez.container.size': '1024',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1000000000'}
configurations = {
"hive-site": {
"properties": {"hive.security.authorization.enabled": "true", 'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true'}
},
"hive-env": {
"properties": {"hive_security_authorization": "None"}
}
}
services = {
"services": []
}
# Test for 'ranger-hive-plugin-properties' not being in configs
res_expected = []
res = self.stackAdvisor.validateHiveConfigurations(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
# This test intentionally calls all validate methods with
# incorrect parameters (empty configs)
def test_noRiskyDictLookups(self):
properties = {}
recommendedDefaults = {}
configurations = {"core-site": {"properties": {}}}
services = {
"services": [],
"Versions": {
"stack_name": "HDP",
"stack_version": "2.3"
},
"configurations": configurations
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"disk_info" : [
{
"available" : "4564632",
"used" : "5230344",
"percent" : "54%",
"size" : "10319160",
"type" : "ext4",
"mountpoint" : "/"
},
{
"available" : "1832436",
"used" : "0",
"percent" : "0%",
"size" : "1832436",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
],
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
def return_c6401_hostname(services, service_name, component_name):
return ["c6401.ambari.apache.org"]
self.stackAdvisor.getComponentHostNames = return_c6401_hostname
validators = self.stackAdvisor.getServiceConfigurationValidators()
# Setting up empty configs and services info
for serviceName, validator in validators.items():
services["services"].extend([{"StackServices": {"service_name": serviceName},
"components": []}])
for siteName in validator.keys():
configurations[siteName] = {"properties": {}}
# Emulate enabled RANGER
services["services"].extend([{"StackServices": {"service_name": "RANGER"},
"components": []}])
configurations["ranger-hbase-plugin-properties"] = {
"ranger-hbase-plugin-enabled": "Yes"
}
exceptionThrown = False
try:
recommendations = self.stackAdvisor.recommendConfigurations(services, hosts)
except Exception as e:
exceptionThrown = True
self.assertTrue(exceptionThrown)
pass
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX",
"StackServices": {
"service_name": "KNOX",
"service_version": "0.9.0.2.3",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX/components/KNOX_GATEWAY",
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1+",
"component_category": "MASTER",
"component_name": "KNOX_GATEWAY",
"display_name": "Knox Gateway",
"is_client": "false",
"is_master": "true",
"hostnames": ["c6401.ambari.apache.org"]
},
"dependencies": []
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
"ranger.sso.providerurl": "",
}
}
},
"ambari-server-properties": {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "false",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:7777'
}
},
'ranger-ugsync-site': {
'properties': {
'ranger.usersync.group.objectclass': 'posixGroup',
'ranger.usersync.group.nameattribute': 'cn',
'ranger.usersync.group.memberattributename': 'memberUid',
'ranger.usersync.ldap.binddn': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'ranger.usersync.ldap.user.nameattribute': 'uid',
'ranger.usersync.ldap.user.objectclass': 'posixAccount',
'ranger.usersync.ldap.url': 'ldap://c6403.ambari.apache.org:389',
'ranger.usersync.ldap.searchBase': 'dc=apache,dc=org'
}
},
'ranger-admin-site': {
'properties': {
"ranger.audit.solr.zookeepers": "NONE",
"ranger.audit.source.type": "solr",
"ranger.sso.providerurl": "https://c6401.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso"
}
},
'ranger-env': {
'properties': {
'ranger-storm-plugin-enabled': 'No',
}
},
'ranger-knox-security': {'properties': {}}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# Recommend ranger.audit.solr.zookeepers when solrCloud is disabled
services['configurations']['ranger-env'] = {
"properties": {
"is_solrCloud_enabled": "false"
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations['ranger-admin-site']['properties']['ranger.audit.solr.zookeepers'], 'NONE')
def test_recommendRangerKMSConfigurations(self):
clusterData = {}
services = {
"ambari-server-properties": {
"ambari-server.user": "root"
},
"Versions": {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER_KMS",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_KMS_SERVER",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"kms-env": {
"properties": {
"kms_user": "kmsname"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8020"
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-properties': {
'properties': {
'DB_FLAVOR': 'ORACLE',
'db_host' : 'c6401.ambari.apache.org:1521:XE',
'db_name' : "XE"
}
},
'cluster-env': {
'properties': {
'security_enabled': 'false'
}
}
},
"forced-configurations": []
}
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
},
'property_attributes': {
'hadoop.kms.proxyuser.HTTP.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.HTTP.users': {'delete': 'true'},
'hadoop.kms.proxyuser.root.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.root.users': {'delete': 'true'}
}
}
}
# non kerberized cluster. There should be no proxyuser configs
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# kerberized cluster
services['services'].append({
"StackServices": {
"service_name": "KERBEROS"
}
})
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
services['configurations']['cluster-env']['properties']['ambari_principal_name'] = "ambari-cl1@EXAMPLE.COM"
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
'hadoop.proxyuser.kmsname.groups': '*'
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
'hadoop.kms.proxyuser.HTTP.hosts': '*',
'hadoop.kms.proxyuser.HTTP.users': '*',
'hadoop.kms.proxyuser.ambari-cl1.hosts': '*',
'hadoop.kms.proxyuser.ambari-cl1.users': '*'
}
}
}
# on kerberized cluster property should be recommended
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
recommendedConfigurations = {}
services['changed-configurations'] = [
{
'type': 'kms-env',
'name': 'kms_user',
'old_value': 'kmsname'
}
]
services['configurations']['kms-env']['properties']['kms_user'] = 'kmsnew'
expected['core-site'] = {
'properties': {
'hadoop.proxyuser.kmsnew.groups': '*'
},
'property_attributes': {
'hadoop.proxyuser.kmsname.groups': {
'delete': 'true'
}
}
}
# kms_user was changed, old property should be removed
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
def test_recommendStormConfigurations(self):
self.maxDiff = None
configurations = {
"storm-site": {
"properties": {
"storm.topology.submission.notifier.plugin.class": "foo"
}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'storm-site': {
'properties': {
'storm.topology.submission.notifier.plugin.class': 'foo,org.apache.atlas.storm.hook.StormAtlasHook',
},
"property_attributes":{
'nimbus.authorizer': {'delete':'true'}
}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
},
"storm-env": {
"properties": {
"storm.atlas.hook": "true"
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"storm-site": {
"properties": {
"storm.topology.submission.notifier.plugin.class": "foo"
},
"property-attributes":{}
},
"ranger-storm-plugin-properties": {
"properties": {
"ranger-storm-plugin-enabled": "No"
}
},
"storm-env": {
"properties": {
"storm.atlas.hook": "false"
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services["services"] = []
services["configurations"]["storm-site"]["properties"]["storm.topology.submission.notifier.plugin.class"] = "org.apache.atlas.storm.hook.StormAtlasHook"
self.stackAdvisor.recommendStormConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(True, "storm.topology.submission.notifier.plugin.class" in configurations["storm-site"]["property_attributes"])
def test_recommendSqoopConfigurations(self):
self.maxDiff = None
configurations = {
"sqoop-site": {
"properties": {
"sqoop.job.data.publish.class": "foo"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'sqoop-site': {
'properties': {
'sqoop.job.data.publish.class': 'org.apache.atlas.sqoop.hook.SqoopHook',
}
},
'sqoop-env': {
'properties': {
'sqoop.atlas.hook': 'true'
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/ATLAS",
"StackServices": {
"service_name": "ATLAS",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "ATLAS_SERVER",
"display_name": "Atlas Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"sqoop-site": {
"properties": {
"sqoop.job.data.publish.class": "foo"
}
},
"sqoop-env": {
"properties": {
"sqoop.atlas.hook": "false"
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendSqoopConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
self.stackAdvisor.recommendSqoopConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendLogsearchConfiguration(self):
configurations = {
"logsearch-properties": {
"properties": {
"logsearch.collection.service.logs.numshards" : "5",
"logsearch.collection.service.logs.replication.factor": "0",
"logsearch.collection.audit.logs.numshards" : "5",
"logsearch.collection.audit.logs.replication.factor": "0"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'logfeeder-env': {'property_attributes': {'logfeeder_external_solr_kerberos_keytab': {'visible': 'false'},
'logfeeder_external_solr_kerberos_principal': {'visible': 'false'}}},
'logsearch-common-env': {'properties': {'logsearch_external_solr_kerberos_enabled': 'false'},
'property_attributes': {'logsearch_external_solr_kerberos_enabled': {'visible': 'false'}}},
'logsearch-env': {'property_attributes': {'logsearch_external_solr_kerberos_keytab': {'visible': 'false'},
'logsearch_external_solr_kerberos_principal': {'visible': 'false'}}},
'logsearch-properties': {
'properties': {
"logsearch.collection.service.logs.numshards" : "2",
"logsearch.collection.service.logs.replication.factor": "1",
"logsearch.collection.audit.logs.numshards" : "2",
"logsearch.collection.audit.logs.replication.factor": "1"
},
"property_attributes": {
"logsearch.collection.service.logs.numshards": {
"minimum": "1",
"maximum": "3"
},
"logsearch.collection.audit.logs.numshards": {
"minimum": "1",
"maximum": "3"
}
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/AMBARI_INFRA",
"StackServices": {
"service_name": "AMBARI_INFRA",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "INFRA_SOLR",
"display_name": "Infra Solr Instance",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": {
"logsearch-properties": {
"properties": {
"logsearch.collection.numshards" : "5",
"logsearch.collection.replication.factor": "0"
}
}
},
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
def return_c6401_hostname(services, service_name, component_name):
return ["c6401.ambari.apache.org"]
self.stackAdvisor.getComponentHostNames = return_c6401_hostname
self.stackAdvisor.recommendLogsearchConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateRangerConfigurationsEnv(self):
properties = {
"ranger-kafka-plugin-enabled": "Yes",
}
recommendedDefaults = {
"ranger-kafka-plugin-enabled": "No",
}
configurations = {
"cluster-env": {
"properties": {
"security_enabled": "false",
}
}
}
services = {
"services":
[
{
"StackServices": {
"service_name" : "RANGER"
}
}
],
"configurations": {
"cluster-env": {
"properties": {
"security_enabled" : "false"
},
"property_attributes": {}
}
}
}
# Test with ranger plugin enabled, validation fails
res_expected = [{'config-type': 'ranger-env', 'message': 'Ranger Kafka plugin should not be enabled in non-kerberos environment.', 'type': 'configuration', 'config-name': 'ranger-kafka-plugin-enabled', 'level': 'WARN'}]
res = self.stackAdvisor.validateRangerConfigurationsEnv(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
# Test for security_enabled is true
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
configurations['cluster-env']['properties']['security_enabled'] = "true"
res_expected = []
res = self.stackAdvisor.validateRangerConfigurationsEnv(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of some `meta` gates.
Contains meta gates, i.e.,
* DaggeredGate (Represents the inverse of an arbitrary gate)
* ControlledGate (Represents a controlled version of an arbitrary gate)
* Tensor/All (Applies a single qubit gate to all supplied qubits), e.g.,
Example:
.. code-block:: python
Tensor(H) | (qubit1, qubit2) # apply H to qubit #1 and #2
As well as the meta functions
* get_inverse (Tries to access the get_inverse member function of a gate and upon failure returns a DaggeredGate)
* C (Creates an n-ary controlled version of an arbitrary gate)
"""
from ._basics import BasicGate, NotInvertible
class ControlQubitError(Exception):
"""Exception thrown when wrong number of control qubits are supplied."""
class DaggeredGate(BasicGate):
"""
Wrapper class allowing to execute the inverse of a gate, even when it does not define one.
If there is a replacement available, then there is also one for the inverse, namely the replacement function run
in reverse, while inverting all gates. This class enables using this emulation automatically.
A DaggeredGate is returned automatically when employing the get_inverse- function on a gate which does not provide
a get_inverse() member function.
Example:
.. code-block:: python
with Dagger(eng):
MySpecialGate | qubits
will create a DaggeredGate if MySpecialGate does not implement get_inverse. If there is a decomposition function
available, an auto- replacer engine can automatically replace the inverted gate by a call to the decomposition
function inside a "with Dagger"-statement.
"""
def __init__(self, gate):
"""
Initialize a DaggeredGate representing the inverse of the gate 'gate'.
Args:
gate: Any gate object of which to represent the inverse.
"""
super().__init__()
self._gate = gate
try:
# Hermitian conjugate is inverse matrix
self.matrix = gate.matrix.getH()
except AttributeError:
pass
def __str__(self):
r"""Return string representation (str(gate) + \"^\dagger\")."""
return str(self._gate) + r"^\dagger"
def tex_str(self):
"""Return the Latex string representation of a Daggered gate."""
if hasattr(self._gate, 'tex_str'):
return self._gate.tex_str() + r"${}^\dagger$"
return str(self._gate) + r"${}^\dagger$"
def get_inverse(self):
"""Return the inverse gate (the inverse of the inverse of a gate is the gate itself)."""
return self._gate
def __eq__(self, other):
"""Return True if self is equal to other, i.e., same type and representing the inverse of the same gate."""
return isinstance(other, self.__class__) and self._gate == other._gate
def __hash__(self):
"""Compute the hash of the object."""
return hash(str(self))
def get_inverse(gate):
"""
Return the inverse of a gate.
Tries to call gate.get_inverse and, upon failure, creates a DaggeredGate instead.
Args:
gate: Gate of which to get the inverse
Example:
.. code-block:: python
get_inverse(H) # returns a Hadamard gate (HGate object)
"""
try:
return gate.get_inverse()
except NotInvertible:
return DaggeredGate(gate)
def is_identity(gate):
"""
Return True if the gate is an identity gate.
Tries to call gate.is_identity and, upon failure, returns False
Args:
gate: Gate of which to get the inverse
Example:
.. code-block:: python
get_inverse(Rx(2*math.pi)) # returns True
get_inverse(Rx(math.pi)) # returns False
"""
return gate.is_identity()
class ControlledGate(BasicGate):
"""
Controlled version of a gate.
Note:
Use the meta function :func:`C()` to create a controlled gate
A wrapper class which enables (multi-) controlled gates. It overloads the __or__-operator, using the first qubits
provided as control qubits. The n control-qubits need to be the first n qubits. They can be in separate quregs.
Example:
.. code-block:: python
ControlledGate(gate, 2) | (qb0, qb2, qb3) # qb0 & qb2 are controls
C(gate, 2) | (qb0, qb2, qb3) # This is much nicer.
C(gate, 2) | ([qb0,qb2], qb3) # Is equivalent
Note:
Use :func:`C` rather than ControlledGate, i.e.,
.. code-block:: python
C(X, 2) == Toffoli
"""
def __init__(self, gate, n=1):
"""
Initialize a ControlledGate object.
Args:
gate: Gate to wrap.
n (int): Number of control qubits.
"""
super().__init__()
if isinstance(gate, ControlledGate):
self._gate = gate._gate
self._n = gate._n + n
else:
self._gate = gate
self._n = n
def __str__(self):
"""Return a string representation of the object."""
return "C" * self._n + str(self._gate)
def get_inverse(self):
"""Return inverse of a controlled gate, which is the controlled inverse gate."""
return ControlledGate(get_inverse(self._gate), self._n)
def __or__(self, qubits):
"""
Apply the controlled gate to qubits, using the first n qubits as controls.
Note: The control qubits can be split across the first quregs. However, the n-th control qubit needs to be
the last qubit in a qureg. The following quregs belong to the gate.
Args:
qubits (tuple of lists of Qubit objects): qubits to which to apply
the gate.
"""
qubits = BasicGate.make_tuple_of_qureg(qubits)
ctrl = []
gate_quregs = []
adding_to_controls = True
for reg in qubits:
if adding_to_controls:
ctrl += reg
adding_to_controls = len(ctrl) < self._n
else:
gate_quregs.append(reg)
# Test that there were enough control quregs and that that
# the last control qubit was the last qubit in a qureg.
if len(ctrl) != self._n:
raise ControlQubitError(
"Wrong number of control qubits. "
"First qureg(s) need to contain exactly "
"the required number of control quregs."
)
import projectq.meta # pylint: disable=import-outside-toplevel
with projectq.meta.Control(gate_quregs[0][0].engine, ctrl):
self._gate | tuple(gate_quregs)
def __eq__(self, other):
"""Compare two ControlledGate objects (return True if equal)."""
return isinstance(other, self.__class__) and self._gate == other._gate and self._n == other._n
def C(gate, n_qubits=1):
"""
Return n-controlled version of the provided gate.
Args:
gate: Gate to turn into its controlled version
n_qubits: Number of controls (default: 1)
Example:
.. code-block:: python
C(NOT) | (c, q) # equivalent to CNOT | (c, q)
"""
return ControlledGate(gate, n_qubits)
class Tensor(BasicGate):
"""
Wrapper class allowing to apply a (single-qubit) gate to every qubit in a quantum register.
Allowed syntax is to supply either a qureg or a tuple which contains only one qureg.
Example:
.. code-block:: python
Tensor(H) | x # applies H to every qubit in the list of qubits x
Tensor(H) | (x,) # alternative to be consistent with other syntax
"""
def __init__(self, gate):
"""Initialize a Tensor object for the gate."""
super().__init__()
self._gate = gate
def __str__(self):
"""Return a string representation of the object."""
return "Tensor(" + str(self._gate) + ")"
def get_inverse(self):
"""Return the inverse of this tensored gate (which is the tensored inverse of the gate)."""
return Tensor(get_inverse(self._gate))
def __eq__(self, other):
"""Equal operator."""
return isinstance(other, Tensor) and self._gate == other._gate
def __or__(self, qubits):
"""Operator| overload which enables the syntax Gate | qubits."""
if isinstance(qubits, tuple):
if len(qubits) != 1:
raise ValueError('Tensor/All must be applied to a single quantum register!')
qubits = qubits[0]
if not isinstance(qubits, list):
raise ValueError('Tensor/All must be applied to a list of qubits!')
for qubit in qubits:
self._gate | qubit
#: Shortcut (instance of) :class:`projectq.ops.Tensor`
All = Tensor
|
|
# -*- coding: utf-8 -*-
""" Sahana Optical Character Recognision Utility (s3ocr)
@author: Suryajith Chillara <suryajith1987[at]gmail.com>
@author: Shiv Deepak <idlecool[at]gmail.com>
@copyright: 2009-2011 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
#========================== import section ================================
__all__ = ["s3ocr_generate_pdf", "s3ocr_get_languages"]
# Generic stuff
import os
import sys
import uuid
from StringIO import StringIO
# Importing the xml stuff
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
from xml.dom.minidom import Document
# Importing reportlab stuff
try:
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import A4
from reportlab.graphics.barcode import code128
except(ImportError):
print >>sys.stderr, "S3 Debug: WARNING: S3OCR: reportlab has not been installed."
# Fonts
Courier = "Courier"
Helvetica = "Helvetica"
Helvetica_Bold = "Helvetica-Bold"
Helvetica_Bold_Oblique = "Helvetica-BoldOblique"
Helvetica_Oblique = "Helvetica-Oblique"
#==========================================================================
#=============== internal Class Definitions and functions =================
#==========================================================================
#======================== pdf layout from xform ===========================
class Form:
""" Form class to use reportlab to generate pdf """
def __init__(self, pdfname="ocrform.pdf", margintop=50, marginsides=50, **kw):
""" Form initialization """
self.pdfpath = kw.get("pdfpath", pdfname)
self.verbose = kw.get("verbose", 0)
self.font = kw.get("typeface", Courier)
self.fontsize = kw.get("fontsize", 13)
self.IObuffer = StringIO()
self.canvas = Canvas(self.IObuffer, pagesize = A4)
self.width, self.height = A4
self.x = marginsides
self.lastx = marginsides
self.marginsides = marginsides
self.margintop = margintop
self.y = self.height - margintop
self.lasty = self.height - margintop
self.num = 1
def barcode(self, uuid):
""" Generate barcode of uuid """
barcode = code128.Code128(str(uuid), barWidth=1, barHeight=20)
barcode.drawOn(self.canvas, self.lastx, self.lasty)
self.lasty = self.lasty - 20
self.y = self.lasty
def decorate(self):
""" Decorates the the form with the markers needed to align the form later """
c = self.canvas
c.rect(20, 20, 20, 20, fill=1)
c.rect(self.width - 40, 20, 20, 20, fill=1)
c.rect(20, self.height - 40, 20, 20, fill=1)
c.rect(self.width/2 - 10, 20, 20, 20, fill=1)
c.rect(20, self.height/2 - 10, 20, 20, fill=1)
c.rect(self.width - 40, self.height - 40, 20, 20, fill=1)
c.rect(self.width - 40, self.height/2 - 10, 20, 20, fill=1)
def print_text(self, lines, fontsize=12, gray=0, seek=0, continuetext=0, style="default"):
""" Give the lines to be printed as a list, set the font and grey level """
c = self.canvas
self.fontsize = fontsize
if style == "center":
self.x = self.width / 2
if seek > (self.width-(self.marginsides + self.fontsize)):
seek = 0
if seek != 0:
self.x = self.x + seek
if continuetext == 1:
self.x = self.lastx + seek
if seek == 0:
self.y = self.y + fontsize
for line in lines:
if style == "center":
self.x = self.x - (len(line)) * self.fontsize / 2
if style == "right":
self.x = self.width - (self.marginsides + len(line) * self.fontsize)
if (self.width - self.marginsides - self.lastx) < 200:
self.x = self.marginsides
if continuetext == 1:
self.y = self.y - 2 * fontsize
if (self.y - self.fontsize) < 50:
self.set_new_page()
t = c.beginText(self.x, self.y)
t.setFont(Helvetica, fontsize)
t.setFillGray(gray)
t.textOut(line)
c.drawText(t)
self.y = self.y - fontsize
self.lastx = t.getX()
self.lasty = self.y
self.x = self.marginsides
def draw_check_boxes(self, boxes=1, completeline=0, lines=0, seek=0, continuetext=0, fontsize=0, gray=0, style="", isdate=0):
""" Function to draw check boxes default no of boxes = 1 """
c = self.canvas
c.setLineWidth(0.90)
c.setStrokeGray(gray)
if style == "center":
self.x = self.width / 2
elif style == "right":
self.x = self.width - self.marginsides - self.fontsize
if seek > (self.width - (self.marginsides + self.fontsize)):
seek = 0
if (self.y - self.fontsize) < 40:
self.set_new_page()
if continuetext == 1:
self.y = self.y + self.fontsize
self.x = self.lastx
else:
self.x = self.marginsides
if seek != 0:
self.x = self.x + seek
if fontsize == 0:
fontsize = self.fontsize
else:
self.fontsize = fontsize
if completeline == 1:
boxes = int(self.width / self.fontsize)
for i in range(boxes):
c.rect(self.x, self.y, self.fontsize, self.fontsize)
self.x = self.x + self.fontsize
if self.x > (self.width - (self.marginsides + self.fontsize)):
break
self.lastx = self.x
self.x = self.marginsides
self.y = self.y - self.fontsize
if isdate:
t = c.beginText(self.x, self.y)
t.setFont(Helvetica, 13)
t.setFillGray(0)
t.textOut(" D D M M Y Y Y Y")
c.drawText(t)
self.y = self.y - fontsize
self.lastx = t.getX()
self.lasty = self.y
self.lastx = self.x
self.x = self.marginsides
self.y = self.y - 13
def draw_circle(self, boxes=1, completeline=0, lines=0, seek=0, continuetext=0, fontsize=0, gray=0, style=""):
""" Draw circles on the form """
c = self.canvas
c.setLineWidth(0.90)
c.setStrokeGray(gray)
if style == "center":
self.x = self.width / 2
elif style == "right":
self.x = self.width - self.marginsides - self.fontsize
if seek > (self.width - (self.marginsides + self.fontsize)):
seek = 0
if (self.y - self.fontsize) < 40:
self.set_new_page()
if continuetext == 1:
self.y = self.y + self.fontsize
self.x = self.lastx
else:
self.x = self.marginsides
if seek != 0:
self.x = self.x + seek
if fontsize == 0:
fontsize = self.fontsize
else:
self.fontsize = fontsize
if completeline == 1:
boxes = int(self.width / self.fontsize)
for i in range(boxes):
c.circle(self.x + self.fontsize/2, self.y+self.fontsize/2, self.fontsize/2, fill = 0)
self.x = self.x + self.fontsize
if self.x > (self.width - (self.marginsides + self.fontsize)):
break
self.lastx = self.x
self.x = self.marginsides
self.y = self.y - self.fontsize
def draw_line(self, gray=0):
""" Function to draw a straight line """
c = self.canvas
c.setStrokeGray(gray)
c.setLineWidth(0.40)
self.y = self.y - (self.fontsize)
c.line(self.x, self.y, self.width - self.x, self.y)
self.y = self.y - (self.fontsize)
def set_new_page(self):
"""
All changes are forgotten when a showPage() has been executed.
They have to be set again.
"""
self.num += 1
c = self.canvas
c.showPage()
self.decorate()
self.x = self.marginsides
self.lastx = self.marginsides
self.y = self.height - self.margintop
self.print_text([str("Page "+ str(self.num))], fontsize=8, style="right")
self.x = self.marginsides
self.lastx = self.x
self.y = self.y - 32
def set_title(self, title = "FORM"):
""" Sets the title of the pdf. """
c = self.canvas.setTitle(title)
def save(self):
""" Saves the form """
self.canvas.save()
pdf = self.IObuffer.getvalue()
self.IObuffer.close()
return pdf
#========== xml.sax.ContentHandler instance for layout parsing ============
class FormHandler(ContentHandler):
def __init__(self, form, uid, lang="eng"):
""" Form initialization and preparation """
self.form = form
self.input = 0
self.select = 0
self.label = 0
self.value = 0
self.read = 0
self.item = 0
self.model = 0
self.itext = 0
self.hint = 0
self.translation = 0
self.translang = ""
self.translist = []
self.text = 0
self.textid, self.texttype = ["", ""]
self.lang = lang
self.printtext = ""
self.title = ""
self.ref = ""
self.initial = 1
self.single = 0
self.multiple = 0
self.uuid = uid
#print self.uuid
self.form.decorate()
self.page = 1
self.xmlcreate()
self.name = ""
self.dict = {}
self.pdf = ""
self.xmls = {}
self.labelTrans = ""
self.customfields = {"location_id":4,\
"staff_id":2,\
"staff2_id":2,\
} # fields having custom sizes
def xmlcreate(self):
""" Creates the xml """
self.doc = Document()
self.xmltitle = "%s_%s_%s.xml" % (str(self.uuid), self.lang, str(self.page))
self.root = self.doc.createElement("guide")
self.doc.appendChild(self.root)
if self.initial == 0:
if self.single == 1:
element = "select1"
elif self.multiple == 1:
element = "select"
elif self.input == 1:
element = "input"
self.child1 = self.doc.createElement(element)
self.child1.setAttribute("ref", self.ref)
self.root.appendChild(self.child1)
if self.initial == 1:
self.initial = 0
def xmlsave(self):
""" Save the xml """
self.xmls[self.xmltitle] = self.doc.toprettyxml(indent = " ")
def startElement(self, name, attrs):
""" Parses the starting element and then check what to read """
self.element = name
self.title = ""
self.value_ch = ""
if not str(name).find(":") == -1:
name = name.split(":")[1]
if name == "input":
self.input = 1
self.ref = attrs.get("ref")
#if not str(self.ref).find("/") == -1:
# ref = str(self.ref).split("/")[-1]
# if ref in self.hiddenfields:
# self.protectedfield = 1
self.child1 = self.doc.createElement(name)
self.child1.setAttribute("ref", self.ref)
if self.ref in self.dict:
self.child1.setAttribute("type", self.dict[self.ref])
self.type = self.dict[self.ref]
else:
self.child1.setAttribute("type", "string")
self.type = "string"
self.root.appendChild(self.child1)
elif name == "label":
self.label = 1
self.labelref = attrs.get("ref")
if self.select != 1:
self.child2 = self.doc.createElement("location")
self.child1.appendChild(self.child2)
elif self.select == 1 and self.item == 1:
self.child2 = self.doc.createElement("location")
self.child1.appendChild(self.child2)
elif name == "select" or name == "select1":
self.select = 1
self.read = 1
self.ref = attrs.get("ref")
self.child1 = self.doc.createElement(name)
self.child1.setAttribute("ref", self.ref)
self.root.appendChild(self.child1)
if name == "select":
self.form.print_text(["", "", str(" Multiple select: "), ""], fontsize=10, gray=0)
self.multiple = 1
else:
self.form.print_text(["", "", str("Single select: "), ""], fontsize=10, gray=0)
self.single = 1
elif name == "item":
self.item = 1
elif name == "value":
self.value = 1
elif name == "bind":
self.dict[str(attrs.get("nodeset"))] = str(attrs.get("type"))
elif name == "itext":
self.itext = 1
elif name == "translation":
self.translation = 1
self.translang = attrs.get("lang")
elif name == "text":
self.text = 1
if attrs.get("id") == "title":
self.textid = "title"
self.texttype = "string"
else:
self.textid, self.texttype = attrs.get("id").split(":")
elif name == "model":
self.model = 1
elif name == "hint":
self.hint = 1
def characters(self, ch):
""" Deal with the data """
if self.item == 1 and self.value == 1 and self.select == 1:
self.value_ch += ch
elif self.itext == 1 and self.translation == 1 and self.text == 1:
self.value_ch += ch
else:
self.title += ch
def endElement(self, name):
""" It specifies the operations to do on closing the element """
if self.form.lasty < 100:
self.form.set_new_page()
self.xmlsave()
self.page += 1
self.xmlcreate()
if not str(name).find(":") == -1:
name = name.split(":")[1]
#if name == "title":
if name == "head":
if self.model == 0:
#self.form.barcode(self.uuid) # not needed till ocr is functional
for trtuple in self.translist:
if trtuple[0] == "title":
self.printtext = trtuple[2]
self.form.set_title(unicode(self.printtext))
#self.form.set_title(str(self.title))
self.form.print_text([unicode(self.printtext)], fontsize=18, style="center")
#self.form.print_text([str(self.title)], fontsize=18, style="center")
self.form.print_text([str("1. Fill the necessary fields in BLOCK CAPITAL letters."), str("2. Always use one box per letter and leave one box space to separate words."), str("3. Fill in the circles completely.")], fontsize=13, gray=0)
self.form.draw_line()
# self.form.print_text([str(self.uuid)], fontsize=10, gray=0)
elif name == "input":
self.input = 0
#self.protectedfield = 0
self.type = ""
elif name == "select" or name == "select1":
self.select = 0
self.multiple = 0
self.single = 0
self.read = 0
self.form.print_text([" ",])
elif name == "label":
if self.input == 1: #and self.protectedfield != 1:
for trtuple in self.translist:
if trtuple[0] == self.ref and trtuple[1] == "label":
self.printtext = trtuple[2]
self.form.print_text([" ", " " + unicode(self.printtext) + " ", " "])
self.child3 = self.doc.createTextNode("%s,%s" % (str(self.form.lastx), str(self.form.lasty)))
self.child2.appendChild(self.child3)
self.child2.setAttribute("font", str(16))
if self.ref == "age":
self.form.draw_check_boxes(boxes=2, completeline=0, continuetext=0, gray=0.9, fontsize=16, seek=10)
self.child2.setAttribute("boxes", str(2))
elif self.type == "date":
self.form.draw_check_boxes(boxes=8, completeline=0, continuetext=0, gray=0.9, fontsize=16, seek=10, isdate=1)
self.child2.setAttribute("boxes", str(8))
elif self.type == "int":
count = (self.form.width - 2 * self.form.marginsides) / 32
self.form.draw_check_boxes(boxes=1, completeline=1, continuetext=0, gray=0.9, fontsize=16, seek=10)
self.child2.setAttribute("boxes", str(count))
elif self.type == "text":
count = (self.form.width - 2 * self.form.marginsides) / 16
self.child2.setAttribute("boxes", str(int(count)))
self.child2.setAttribute("lines", "4")
for i in xrange(4):
self.form.draw_check_boxes(boxes=1, completeline=1, continuetext=0, gray=0.9, fontsize=16, seek=10)
else:
if not str(self.ref).find("/") == -1:
ref = str(self.ref).split("/")[-1]
if ref in self.customfields.keys():
numlines = self.customfields[ref]
else:
numlines = 1
count = (self.form.width - 2 * self.form.marginsides) / 16
self.child2.setAttribute("boxes", str(int(count)))
self.child2.setAttribute("lines", str(numlines))
for i in xrange(numlines):
self.form.draw_check_boxes(boxes=1, completeline=1, continuetext=0, gray=0.9, fontsize=16, seek=10)
elif self.item == 1 and self.select == 1:
labelid, labeltype = self.labelref.split("'")[1].split("&")[0].split(":")
for trtuple in self.translist:
if trtuple[0] == labelid and trtuple[1] == labeltype:
self.printtext = trtuple[2]
if self.printtext != "None" and self.printtext != "Unknown":
self.form.print_text([" %s" % self.printtext], continuetext = 1)
x = self.form.lastx
y = self.form.lasty
self.form.draw_circle(boxes=1, continuetext=1, gray=0.9, fontsize=12, seek=10)
self.labelTrans = "Trans"
else:
self.labelTrans = "NoTrans"
elif self.read == 1 and self.select == 1:
labelid, labeltype = self.labelref.split("'")[1].split("&")[0].split(":")
for trtuple in self.translist:
if trtuple[0] == labelid and trtuple[1] == labeltype:
self.printtext = trtuple[2]
self.form.print_text([" %s " % str(self.printtext), " ", " "])
self.read = 0
self.label= 0
self.labelref = ""
labelid, labeltype = ["", ""]
trtuple = ("", "", "")
self.printtext = ""
elif name == "value":
self.value = 0
if self.select == 1:
self.child3 = self.doc.createTextNode("%s,%s" % (str(self.form.lastx - 12), str(self.form.lasty)))
self.child2.appendChild(self.child3)
self.child2.setAttribute("value", str(self.value_ch))
self.child2.setAttribute("font", str(12))
self.child2.setAttribute("boxes", str(1))
if self.item == 1 and self.labelTrans == "NoTrans":
self.printtext = str(self.value_ch)
self.form.print_text([" " + self.printtext], continuetext = 1)
x = self.form.lastx
y = self.form.lasty
self.form.draw_circle(boxes=1, continuetext=1, gray=0.9, fontsize=12, seek=10)
self.labelTrans == ""
if self.itext == 1 and self.translation == 1 and self.text == 1 and self.translang == self.lang:
self.translist.append((self.textid, self.texttype, unicode(self.value_ch)))
self.value_ch = ""
elif name == "item":
self.item = 0
elif name == "itext":
self.itext = 0
elif name == "translation":
self.translation = 0
self.translang = ""
elif name == "text":
self.name = 0
self.textid, self.texttype = ["", ""]
elif name == "model":
self.model = 0
elif name == "hint":
for trtuple in self.translist:
if trtuple[0] == self.ref and trtuple[1] == "hint":
self.printtext = trtuple[2]
if self.printtext not in ["None", "Unkown"]:
self.form.print_text([" %s " % str(self.printtext), " ", " "], fontsize=10)
self.hint = 0
elif name == "html":
self.translist = [] # clearing the translation mapping
#print "End, saving with the filename "+str(self.form.pdfpath)
self.xmlsave()
self.pdf = self.form.save()
self.title = ""
def get_files(self):
""" Returns pdf text and layout xml text as dict """
return self.pdf, self.xmls
#== xml.sax.ContentHandler instance to find available languages in xform ==
class LangHandler(ContentHandler):
""" To retrieve list of available languages """
def __init__(self):
""" Form initialization and preparation"""
self.itext = 0
self.translation = 0
self.lang = []
def startElement(self, name, attrs):
""" Parses the starting element and then check what to read """
if not str(name).find(":") == -1:
name = name.split(":")[1]
if name == "translation":
if self.translation == 0 and self.itext == 1:
self.translation= 1
self.lang.append(str(attrs.get("lang")))
elif name == "itext":
self.itext = 1
def endElement(self, name):
""" It specifies the operations to do on closing the element """
if not str(name).find(":") == -1:
name = name.split(":")[1]
if name == "translation":
self.translation = 0
elif name == "itext":
self.itext = 0
def get_lang(self):
""" Return list of available languages in the xform """
return self.lang
def _open_anything(source):
""" Read anything link/file/string """
import urllib
try:
return urllib.urlopen(source)
except (IOError, OSError):
pass
try:
return open(source, "r")
except (IOError, OSError):
pass
return StringIO.StringIO(str(source))
#==========================================================================
#================================= OCR API ================================
#==========================================================================
def s3ocr_generate_pdf(xform, pdflang):
""" Generates pdf/xml files out of xform with language support """
uid = uuid.uuid1()
pdfs = {}
xmls = {}
form = Form(pdfname = "%s.pdf" % str(uid))
formhandler = FormHandler(form, uid, pdflang)
saxparser = make_parser()
saxparser.setContentHandler(formhandler)
datasource = _open_anything(xform)
saxparser.parse(datasource)
pdf, xmls = formhandler.get_files()
pdfs["%s_%s/pdf" % (str(uid), str(pdflang))] = pdf
return pdfs, xmls
def s3ocr_get_languages(xform):
""" Shows the languages supported by given xform """
formhandler = LangHandler()
saxparser = make_parser()
saxparser.setContentHandler(formhandler)
datasource = _open_anything(xform)
saxparser.parse(datasource)
langlist = formhandler.get_lang()
return langlist
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit("Usage: python xforms2pdf.py filename.xml language")
xform = sys.argv[1]
if len(sys.argv) < 3:
lang = "eng"
else:
lang = str(sys.argv[2])
avail_langs = s3ocr_get_languages(xform)
if lang not in avail_langs:
sys.exit("Required Language '"+\
lang+\
"' is not available, available languages are:\n" + str(avail_langs))
pdfs, xmls = s3ocr_generate_pdf(xform, "eng")
for i in pdfs.keys():
f = open(i, "w")
f.write(pdfs[i])
f.close()
for i in xmls.keys():
f = open(i, "w")
f.write(xmls[i])
f.close()
|
|
# Copyright (c) 2011, Shutterstock Images LLC.
# All rights reserved.
#
# This file is subject to the MIT License (see the LICENSE file).
import datetime
import json
import os
import txmongo
import validictory
from twisted.internet import defer
from twisted.python import log
from oplog import utils
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
SERVER_ERROR = -32099
ERRORS = {
PARSE_ERROR: 'Parse error', # Invalid JSON was received by the server.
INVALID_REQUEST: 'Invalid Request', # The JSON sent is not a valid Request object.
METHOD_NOT_FOUND: 'Method not found', # The method does not exist / is not available.
INVALID_PARAMS: 'Invalid params', # Invalid method parameter(s).
INTERNAL_ERROR: 'Internal error', # Internal JSON-RPC error.
SERVER_ERROR: 'Server error', # Reserved for implementation-defined server-errors.
}
class Error(Exception):
def __init__(self, code=INTERNAL_ERROR, message=None, http_code=400):
self.code = code
self.http_code = http_code
if not message:
message = ERRORS.get(code, INTERNAL_ERROR)
super(Error, self).__init__(message)
class ServerError(Error):
def __init__(self, message, code=SERVER_ERROR, **kwargs):
super(ServerError, self).__init__(code=code, message=message, **kwargs)
class Handler(object):
_schema = {}
_schema_list = [os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema')]
def __init__(self, user, settings, db):
self.user = user
self.settings = settings
self.db = db
def err(self, message, error):
log.err('%s (%s): %s' % (message, type(error), error))
raise ServerError(message)
def load_schema(self, name):
for root in self._schema_list:
path = '%s.json' % os.path.join(root, name)
if os.path.isfile(path):
try:
with open(path) as f:
return json.loads(f.read())
except Exception, error:
log.err('Unable to parse schema: %s' % path)
log.err('No schema found for: %s' % name)
def validate(self, name, data):
if not name in self._schema:
schema = self.load_schema(name)
if schema:
self._schema[name] = schema
else:
schema = self._schema.get(name)
# Only validate if we have a schema defined
if schema:
try:
validictory.validate(data, schema, required_by_default=False)
except ValueError, error:
log.err('Validation failed because: %s' % error)
raise Error(INVALID_PARAMS)
@defer.inlineCallbacks
def __call__(self, params):
result = yield self.run(**params)
try:
defer.returnValue({'result': result})
except TypeError, error:
log.err('Unable to encode result: %s (%s)' % (error, result))
raise Error(INTERNAL_ERROR)
@defer.inlineCallbacks
def run(self, **kwargs):
raise Error(METHOD_NOT_FOUND)
class EntryHandler(Handler):
def backup(self, entry):
return self.db.entry_history.insert({
'date': datetime.datetime.utcnow(),
'body': entry,
'type': self.name,
})
class EntryDel(EntryHandler):
name = 'entry.del'
@defer.inlineCallbacks
def run(self, **values):
self.validate(self.name, values)
try:
values = utils.mongify.encode(values)
# Get old value so we can add to history
entry = yield self.db.entry.find_one(values)
if not entry:
raise Error('Entry with id "%s" not found' % values['_id'])
result = yield self.db.entry.remove(values, safe=True)
if not result.get('err'):
yield self.backup(entry)
defer.returnValue(result)
except Exception, error:
self.err('Failed to delete entry', error)
class EntryGet(EntryHandler):
name = 'entry.get'
def gen_sort(self, sort):
if not sort:
return
f = ()
for name, ordering in sort:
if ordering == -1:
f += txmongo.filter.DESCENDING(name)
elif ordering == 1:
f += txmongo.filter.ASCENDING(name)
return txmongo.filter.sort(*f) if f else None
@defer.inlineCallbacks
def run(self, **values):
self.validate(self.name, values)
find = values['find']
skip = values.get('skip', 0)
limit = values.get('limit', 20) # Default to 20 records
sort = values.get('sort', [])
fields = values.get('fields')
try:
sort = self.gen_sort(sort)
if '_id' in find:
find['_id'] = txmongo.ObjectId(find['_id'])
find = utils.mongify.encode(find)
results = yield self.db.entry.find(spec=find, skip=skip, limit=limit, filter=sort, fields=fields)
defer.returnValue(utils.mongify.decode(list(results)))
except Exception, error:
log.err('Get entry error: %s' % error)
raise ServerError('Failed to query entries')
class EntryPut(EntryHandler):
name = 'entry.put'
@defer.inlineCallbacks
def run(self, **values):
def validate(values):
# Validate entry.put schema
self.validate(self.name, values)
# Validate type schema
if '_type' in values and isinstance(values['_type'], basestring):
self.validate('entry.type.%s' % values['_type'], values)
try:
# Update if we have an _id, this operation is much less efficient
# than an insert because of the read, write and possible second
# write hack to get schema validation
if '_id' in values:
_id = txmongo.ObjectId(values.pop('_id'))
# Get old value so we can revert if schema fails or add to
# history collection
old_entry = yield self.db.entry.find_one({'_id': _id})
if not old_entry:
raise Error('Entry with id "%s" not found' % _id)
result = yield self.db.entry.update(
{'_id': _id, '_user': self.user},
# "clean" encode is relatively naive and probably adds
# little security, but attempts to disallow updates to base
# fields that start with an underscore
utils.mongify.encode(values, clean=True),
upsert=False,
safe=True,
)
# Get updated value so we can validate
new_entry = yield self.db.entry.find_one({'_id': _id})
try:
validate(utils.mongify.decode(new_entry))
except ValueError, error:
_id = old_entry.pop('_id')
yield self.db.entry.update({'_id': _id}, old_entry, upsert=False, safe=True)
raise error
else:
yield self.backup(old_entry)
else:
# Set default values
if not '_date' in values:
values['_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
# Enforce user
values['_user'] = self.user
validate(values)
result = yield self.db.entry.insert(utils.mongify.encode(values), safe=True)
defer.returnValue(utils.mongify.decode(result))
except Error, error:
raise error
except Exception, error:
self.err('Failed to put entry', error)
ROUTE = {
EntryDel.name: EntryDel,
EntryGet.name: EntryGet,
EntryPut.name: EntryPut,
}
def route(user, request, message):
return ROUTE.get(message['method'], Handler)(user, request.settings, request.mongo)(message['params'])
|
|
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from oslo_vmware import exceptions as vmware_exceptions
from oslo_vmware import vim_util
from vmware_dvs.common import config
from vmware_dvs.common import exceptions
from vmware_dvs.utils import dvs_util
from vmware_dvs.utils import spec_builder
from vmware_dvs.common import constants as dvs_const
CONF = config.CONF
fake_network = {'id': '34e33a31-516a-439f-a186-96ac85155a8c',
'name': '_fake_network_',
'admin_state_up': True}
fake_segment = {'segmentation_id': '102'}
fake_port = {
'id': '_dummy_port_id_',
'dvs_port_key': '_dummy_port_key_',
'admin_state_up': True,
'device_id': '_dummy_server_id_',
'security_group_rules': [{'ethertype': 'IPv4',
'direction': 'ingress'}]
}
fake_security_group = {'description': u'Default security group',
'id': u'9961d207-c96c-4907-be9e-d979d5353885',
'name': u'default',
'security_group_rules': [
{'direction': u'ingress',
'ethertype': u'IPv4',
'id': u'0e78cacc-ef5c-45ac-8a11-f9ce9138dce5',
'port_range_max': None,
'port_range_min': None,
'protocol': None,
'remote_group_id': u'9961d207-c96c-4907-'
u'be9e-d979d5353885',
'remote_ip_prefix': None,
'security_group_id': u'9961d207-c96c-4907-be9e-'
u'd979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'},
{'direction': u'ingress',
'ethertype': u'IPv6',
'id': u'35e8a8e2-8410-4fae-ad21-26dd3f403b92',
'port_range_max': None,
'port_range_min': None,
'protocol': None,
'remote_group_id': u'9961d207-c96c-4907'
u'-be9e-d979d5353885',
'remote_ip_prefix': None,
'security_group_id': u'9961d207-c96c-'
u'4907-be9e-d979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'},
{'direction': u'egress',
'ethertype': u'IPv6',
'id': u'52a93b8c-25aa-4829-9a6b-0b7ec3f7f89c',
'port_range_max': None,
'port_range_min': None,
'protocol': None,
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': u'9961d207-c96c-4907-'
u'be9e-d979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'},
{'direction': u'ingress',
'ethertype': u'IPv4',
'id': u'625b0755-30e0-4ff6-b3e4-d0f21c5c09e2',
'port_range_max': 22L,
'port_range_min': 22L,
'protocol': u'tcp',
'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': u'9961d207-c96c-4907-'
u'be9e-d979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'},
{'direction': u'egress',
'ethertype': u'IPv4',
'id': u'bd00ea5d-91ea-4a39-80ca-45ce73a3bc6f',
'port_range_max': None,
'port_range_min': None,
'protocol': None,
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': u'9961d207-c96c-4907-'
u'be9e-d979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'},
{'direction': u'ingress',
'ethertype': u'IPv4',
'id': u'c7c11328-a8ae-42a3-b30e-9cd2ac1cbef5',
'port_range_max': None,
'port_range_min': None,
'protocol': u'icmp',
'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': u'9961d207-c96c-4907-'
u'be9e-d979d5353885',
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'}],
'tenant_id': u'9d2c4b37b9474bcbbddacc5f03fb89c4'}
class UtilBaseTestCase(base.BaseTestCase):
def _get_factory_mock(self, expected_names):
def create_side_effect(namespace):
if namespace in expected_names:
return mock.Mock(name=namespace)
else:
self.fail('Unexpected call. Namespace: %s' % namespace)
factory = mock.Mock()
factory.create.side_effect = create_side_effect
return factory
class DVSControllerBaseTestCase(UtilBaseTestCase):
"""Base of all DVSController tests"""
def setUp(self):
super(DVSControllerBaseTestCase, self).setUp()
self.dvs_name = 'dvs_name'
self.vim = mock.Mock()
self.connection = self._get_connection_mock(self.dvs_name)
self.datacenter = 'datacenter1'
# self.use_patch('vmware_dvs.util.DVSController._get_datacenter',
# return_value=self.datacenter)
self.dvs = mock.Mock()
dvs_param = [self.dvs, self.datacenter]
self.use_patch('vmware_dvs.utils.dvs_util.DVSController._get_dvs',
return_value=dvs_param)
self.dvs = dvs_param[0]
self.datacenter = dvs_param[1]
self.controller = dvs_util.DVSController(self.dvs_name,
self.connection)
def use_patch(self, *args, **kwargs):
patch = mock.patch(*args, **kwargs)
self.addCleanup(patch.stop)
return patch.start()
def _get_connection_mock(self, dvs_name):
raise NotImplementedError
class DVSControllerTestCase(DVSControllerBaseTestCase):
"""Tests of DVSController that don't call API methods"""
def test_creation(self):
self.assertEqual(self.datacenter, self.controller._datacenter)
self.assertEqual(self.dvs, self.controller._dvs)
self.assertIs(self.connection, self.controller.connection)
def test__get_net_name(self):
expect = self.dvs_name + fake_network['id']
self.assertEqual(expect, self.controller._get_net_name(fake_network))
@mock.patch('vmware_dvs.utils.dvs_util.DVSController.get_port_info')
def test_release_port(self, get_port_info_mock):
dvs_port = mock.Mock(key=fake_port['dvs_port_key'])
get_port_info_mock.return_value = dvs_port
self.controller._blocked_ports.add(dvs_port.key)
self.connection.wait_for_task.return_value = mock.Mock(state="error")
self.controller.release_port(fake_port)
self.assertNotIn(dvs_port.key, self.controller._blocked_ports)
get_port_info_mock.assert_called_once_with(fake_port)
self.assertEqual(1, self.connection.invoke_api.call_count)
self.assertEqual(
mock.call(self.vim, 'ReconfigureDVPort_Task', self.dvs,
port=mock.ANY),
self.connection.invoke_api.call_args)
args, kwargs = self.connection.invoke_api.call_args
update_spec = kwargs['port'][0]
self.assertEqual(dvs_port.key, update_spec.key)
self.assertEqual('remove', update_spec.operation)
self.connection.wait_for_task.return_value = mock.Mock(state="success")
self.controller.release_port(fake_port)
@mock.patch('vmware_dvs.utils.dvs_util.DVSController.get_port_info',
side_effect=exceptions.PortNotFound())
def test_release_port_not_found(self, get_port_info_mock):
self.controller.release_port(fake_port)
get_port_info_mock.assert_called_once_with(fake_port)
self.connection.invoke_api.assert_not_called()
def test_get_port_info(self):
port_with_dvs_key = {'binding:vif_details': {'dvs_port_key': 0},
'id': 'port_with_dvs_key'}
port = {'id': 'fake_port_id'}
with mock.patch.object(self.controller,
'_get_port_info_by_portkey') as get_port_info_by_key_mock, \
mock.patch.object(self.controller,
'_get_port_info_by_name') as get_port_info_by_name_mock:
self.controller.get_port_info(port_with_dvs_key)
get_port_info_by_key_mock.assert_called_once_with(0)
get_port_info_by_name_mock.assert_not_called()
get_port_info_by_key_mock.reset_mock()
get_port_info_by_name_mock.reset_mock()
self.controller.get_port_info(port)
get_port_info_by_key_mock.assert_not_called()
get_port_info_by_name_mock.assert_called_once_with(port['id'])
def test_get_port_info_for_port_with_dvs_key(self):
port = {'binding:vif_details': {'dvs_port_key': 0},
'id': 'fake_port_id'}
self.connection.invoke_api = mock.Mock(return_value=[])
self.assertRaises(exceptions.PortNotFound,
self.controller.get_port_info,
port)
port_info = mock.Mock()
self.connection.invoke_api = mock.Mock(return_value=[port_info])
result = self.controller.get_port_info(port)
self.assertEqual(result, port_info)
def test_get_port_info_without_port_list(self):
port = {'id': 'fake_port_id'}
dummy_port_config = mock.Mock()
dummy_port_config.name = 'dummy_port_id'
dummy_port = mock.Mock(key='dummmy_port_key', config=dummy_port_config)
with mock.patch.object(self.controller, 'get_ports') as get_ports_mock:
get_ports_mock.return_value = [dummy_port]
self.assertRaises(exceptions.PortNotFound,
self.controller.get_port_info,
port)
port_config = mock.Mock()
port_config.name = 'fake_port_id'
fake_port_info = mock.Mock(key='fake_port_key', config=port_config)
get_ports_mock.return_value = [fake_port_info, dummy_port]
result = self.controller.get_port_info(port)
self.assertEqual(result, fake_port_info)
def _get_connection_mock(self, dvs_name):
return mock.Mock(vim=self.vim)
class VirtualE1000(object):
def __init__(self, port_key, switch_uuid):
self.backing = mock.Mock()
self.backing.port.portKey = port_key
self.backing.port.switchUuid = switch_uuid
class DVSControllerNetworkCreationTestCase(DVSControllerBaseTestCase):
def test_create_network(self):
try:
self.controller.create_network(fake_network, fake_segment)
except AssertionError:
raise
except Exception as e:
self.fail("Can't create network. Reason: %s" % e)
else:
self.assertEqual(1, self.connection.invoke_api.call_count)
self.assertEqual(1, self.connection.wait_for_task.call_count)
def test_create_network_which_is_blocked(self):
org_side_effect = self.connection.invoke_api.side_effect
def side_effect(module, method, *args, **kwargs):
if method == 'CreateDVPortgroup_Task':
blocked_spec = kwargs['spec'].defaultPortConfig.blocked
self.assertEqual('0', blocked_spec.inherited)
self.assertEqual('true', blocked_spec.value)
return kwargs['spec']
else:
return org_side_effect(module, method, *args, **kwargs)
self.connection.invoke_api.side_effect = side_effect
network = dict(fake_network)
network['admin_state_up'] = False
self.controller.create_network(network, fake_segment)
def test_create_network_raises_VMWareDVSException(self):
# first we count calls
self.controller.create_network(fake_network, fake_segment)
api_calls = self.connection.invoke_api.call_count
# then we throw VimException for every api call
for i in range(api_calls):
connection = self._get_connection_mock(self.dvs_name)
org_side_effect = self.connection.invoke_api.side_effect
def side_effect(*args, **kwargs):
if connection.invoke_api.call_count == i + 1:
msg = ('Failed test with args: %(args)s '
'and kwargs: %(kwargs)s' % {'args': args,
'kwargs': kwargs})
raise vmware_exceptions.VimException(msg)
return org_side_effect(*args, **kwargs)
connection.invoke_api.side_effect = side_effect
controller = dvs_util.DVSController(self.dvs_name, connection)
self.assertRaises(exceptions.VMWareDVSException,
controller.create_network, fake_network,
fake_segment)
def _get_connection_mock(self, dvs_name):
vim = self.vim
vim.client.factory = self._get_factory_mock((
'ns0:DVPortgroupConfigSpec',
'ns0:VMwareDVSPortSetting',
'ns0:VmwareDistributedVirtualSwitchVlanIdSpec',
'ns0:BoolPolicy',
'ns0:DVPortgroupConfig',
'ns0:DVPortgroupPolicy',
'ns0:DvsTrafficRule',
'ns0:DvsDropNetworkRuleAction',
'ns0:DvsIpNetworkRuleQualifier',
'ns0:DvsFilterPolicy',
'ns0:DvsTrafficRuleset',
'ns0:DvsTrafficFilterConfig'))
def invoke_api_side_effect(module, method, *args, **kwargs):
if module is vim_util:
if method == 'get_objects':
if args == (vim, 'Datacenter', 100, ['name']):
return mock.Mock(objects=[
mock.Mock(obj='datacenter1')
])
elif module == vim:
if method == 'CreateDVPortgroup_Task':
self.assertEqual((self.dvs,), args)
self.assert_create_specification(kwargs['spec'])
return kwargs['spec']
self.fail('Unexpected call. Module: %(module)s; '
'method: %(method)s; args: %(args)s, '
'kwargs: %(kwargs)s' % {'module': module,
'method': method,
'args': args,
'kwargs': kwargs})
invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
connection = mock.Mock(invoke_api=invoke_api, vim=vim)
return connection
def assert_create_specification(self, spec):
self.assertEqual(
self.controller._get_net_name(fake_network), spec.name)
self.assertEqual('earlyBinding', spec.type)
self.assertEqual('Managed By Neutron', spec.description)
vlan_spec = spec.defaultPortConfig.vlan
self.assertEqual(fake_segment['segmentation_id'],
vlan_spec.vlanId)
self.assertEqual('0', vlan_spec.inherited)
blocked_spec = spec.defaultPortConfig.blocked
self.assertEqual('1', blocked_spec.inherited)
self.assertEqual('false', blocked_spec.value)
class DVSControllerNetworkUpdateTestCase(DVSControllerBaseTestCase):
def test_update_network(self):
try:
self.controller.update_network(fake_network)
except AssertionError:
raise
except Exception as e:
self.fail("Didn't update network. Reason: %s" % e)
else:
self.assertEqual(5, self.connection.invoke_api.call_count)
self.assertEqual(1, self.connection.wait_for_task.call_count)
def test_update_network_change_admin_state_to_down(self):
org_side_effect = self.connection.invoke_api.side_effect
def side_effect(module, method, *args, **kwargs):
if 'config' in args:
config = mock.Mock()
config.defaultPortConfig.blocked.value = False
return config
elif method == 'ReconfigureDVPortgroup_Task':
blocked_spec = kwargs['spec'].defaultPortConfig.blocked
self.assertEqual('0', blocked_spec.inherited)
self.assertEqual('true', blocked_spec.value)
return kwargs['spec']
else:
return org_side_effect(module, method, *args, **kwargs)
self.connection.invoke_api.side_effect = side_effect
network = dict(fake_network)
network['admin_state_up'] = False
self.controller.update_network(network)
def test_update_network_when_there_is_no_admin_state_transition(self):
org_side_effect = self.connection.invoke_api.side_effect
for state in (True, False):
def side_effect(module, method, *args, **kwargs):
if 'config' in args:
config = mock.Mock()
config.defaultPortConfig.blocked.value = state
return config
elif method == 'ReconfigureDVPortgroup_Task':
self.fail('Request is not required, because there is no '
'transition of admin state')
else:
return org_side_effect(module, method, *args, **kwargs)
self.connection.invoke_api.side_effect = side_effect
network = dict(fake_network)
network['admin_state_up'] = not state
self.controller.update_network(network)
def assert_update_specification(self, spec):
self.assertEqual('config_version', spec.configVersion)
blocked_spec = spec.defaultPortConfig.blocked
self.assertEqual('1', blocked_spec.inherited)
self.assertEqual('false', blocked_spec.value)
def _get_connection_mock(self, dvs_name):
vim = self.vim
vim.client.factory = self._get_factory_mock((
'ns0:BoolPolicy',
'ns0:VMwareDVSPortSetting',
'ns0:DVPortgroupConfigSpec',
'ns0:DVPortgroupPolicy'
))
wrong_pg = mock.Mock(_type='DistributedVirtualPortgroup',
name='wrong_pg')
pg_to_update = mock.Mock(_type='DistributedVirtualPortgroup',
name='pg_to_update')
not_pg = mock.Mock(_type='not_pg', name='not_pg')
objects = [wrong_pg, pg_to_update, not_pg]
def invoke_api_side_effect(module, method, *args, **kwargs):
if module is vim_util:
if method == 'get_objects':
if args == (vim, 'Datacenter', 100, ['name']):
return mock.Mock(objects=[
mock.Mock(obj='datacenter1')])
elif method == 'get_object_property':
if args == (vim, 'datacenter1', 'network'):
return mock.Mock(ManagedObjectReference=objects)
elif args == (vim, wrong_pg, 'name'):
return 'wrong_pg'
elif args == (vim, pg_to_update, 'name'):
return self.controller._get_net_name(fake_network)
elif args == (vim, not_pg, 'name'):
self.fail('Called with not pg')
elif args == (vim, pg_to_update, 'config'):
config = mock.Mock()
config.defaultPortConfig.blocked.value = True
config.configVersion = 'config_version'
return config
elif module == vim:
if method == 'ReconfigureDVPortgroup_Task':
self.assertEqual((pg_to_update, ), args)
self.assert_update_specification(kwargs['spec'])
return kwargs['spec']
self.fail('Unexpected call. Module: %(module)s; '
'method: %(method)s; args: %(args)s, '
'kwargs: %(kwargs)s' % {'module': module,
'method': method,
'args': args,
'kwargs': kwargs})
invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
connection = mock.Mock(invoke_api=invoke_api, vim=vim)
return connection
class DVSControllerNetworkDeletionTestCase(DVSControllerBaseTestCase):
def test_delete_network(self):
try:
self.controller.delete_network(fake_network)
except AssertionError:
raise
except Exception as e:
self.fail("Didn't delete network. Reason: %s" % e)
else:
self.assertEqual(4, self.connection.invoke_api.call_count)
self.assertEqual(1, self.connection.wait_for_task.call_count)
def test_delete_network_tries_to_delete_non_existing_port_group(self):
org_side_effect = self.connection.invoke_api.side_effect
vim = self.vim
def side_effect(module, method, *args, **kwargs):
if args == (vim, 'datacenter1', 'network'):
return mock.Mock(ManagedObjectReference=[])
else:
return org_side_effect(module, method, *args, **kwargs)
self.connection.invoke_api.side_effect = side_effect
try:
self.controller.delete_network(fake_network)
except exceptions.PortGroupNotFound:
self.fail('Deletion of non existing network should pass silent')
def _get_connection_mock(self, dvs_name):
vim = self.vim
wrong_pg = mock.Mock(_type='DistributedVirtualPortgroup',
name='wrong_pg')
pg_to_delete = mock.Mock(_type='DistributedVirtualPortgroup',
name='pg_to_delete')
not_pg = mock.Mock(_type='not_pg', name='not_pg')
objects = [wrong_pg, pg_to_delete, not_pg]
def invoke_api_side_effect(module, method, *args, **kwargs):
if module is vim_util:
if method == 'get_objects':
if args == (vim, 'Datacenter', 100, ['name']):
return mock.Mock(objects=[
mock.Mock(obj='datacenter1')])
elif method == 'get_object_property':
if args == (vim, 'datacenter1', 'network'):
return mock.Mock(ManagedObjectReference=objects)
elif args == (vim, wrong_pg, 'name'):
return 'wrong_pg'
elif args == (vim, pg_to_delete, 'name'):
return self.controller._get_net_name(fake_network)
elif args == (vim, not_pg, 'name'):
self.fail('Called with not pg')
elif module == vim:
if method == 'Destroy_Task':
self.assertEqual((pg_to_delete, ), args)
return
self.fail('Unexpected call. Module: %(module)s; '
'method: %(method)s; args: %(args)s, '
'kwargs: %(kwargs)s' % {'module': module,
'method': method,
'args': args,
'kwargs': kwargs})
invoke_api = mock.Mock(side_effect=invoke_api_side_effect)
connection = mock.Mock(invoke_api=invoke_api, vim=vim)
return connection
class DVSControllerPortUpdateTestCase(DVSControllerBaseTestCase):
def test_switch_port_blocked_state(self):
port = fake_port.copy()
port['admin_state_up'] = False
dvs_port = mock.Mock()
dvs_port.config.setting.blocked.value = True
with mock.patch.object(self.controller, 'get_port_info',
return_value=dvs_port) as get_port_info_mock:
self.controller.switch_port_blocked_state(port)
get_port_info_mock.called_once_with(port)
self.assertEqual(1, self.connection.invoke_api.call_count)
self.assertEqual(
mock.call(
self.vim, 'ReconfigureDVPort_Task', self.dvs,
port=mock.ANY),
self.connection.invoke_api.call_args)
args, kwargs = self.connection.invoke_api.call_args
update_spec = kwargs['port'][0]
self.assertEqual(dvs_port.key, update_spec.key)
self.assertEqual('edit', update_spec.operation)
self.assertTrue(update_spec.settings.blocked)
self.assertEqual(1, self.connection.wait_for_task.call_count)
def test_switch_port_blocked_state_failed(self):
port = {'id': 'fake_port_id'}
with mock.patch.object(self.controller,
'get_port_info') as get_port_info_mock:
get_port_info_mock.side_effect = exceptions.PortNotFound(id='')
self.controller.switch_port_blocked_state(port)
self.connection.invoke_api.assert_not_called()
get_port_info_mock.side_effect = vmware_exceptions.VimException()
self.assertRaises(exceptions.VMWareDVSException,
self.controller.switch_port_blocked_state,
port)
def _get_connection_mock(self, dvs_name):
return mock.Mock(vim=self.vim)
class UpdateSecurityGroupRulesTestCase(DVSControllerBaseTestCase):
BOUND_PORTS = (1, 7, 15)
UNBOUND_PORT = 123
PORTGROUP_KEY = 345
def setUp(self):
super(UpdateSecurityGroupRulesTestCase, self).setUp()
self.spec = mock.Mock()
self.vim.client.factory.create.return_value = self.spec
# TODO(ekosareva): fix and move this test in test_sg_utils.py
# def test_update_port_rules(self):
# ports = [fake_port]
# port_info = {'config': {'configVersion': '_config_version_'},
# 'key': '_dvs_port_key_'}
# self.use_patch('vmware_dvs.util.DVSController'
# '._get_port_info_by_name', return_value=port_info)
# self.use_patch('vmware_dvs.util.DVSController.get_ports',
# return_value=ports)
# self.controller.update_port_rules(ports)
# self.assertTrue(self.connection.invoke_api.called)
# args, kwargs = self.connection.invoke_api.call_args
# self.assertEqual(self.vim, args[0])
# self.assertEqual('ReconfigureDVPort_Task', args[1])
# self.assertEqual(self.dvs, args[2])
# call_ports = kwargs['port']
# self.assertEqual(len(ports), len(call_ports))
# self.assertEqual('_config_version_', self.spec.configVersion)
# self.assertEqual('_dvs_port_key_', self.spec.key)
def test__get_ports_for_pg(self):
pg = mock.Mock()
self.use_patch('vmware_dvs.utils.dvs_util.DVSController'
'._get_pg_by_name', return_value=pg)
some_ports = self.BOUND_PORTS
with mock.patch.object(self.controller.connection, 'invoke_api',
return_value=[some_ports]) as m:
self.assertEqual(
some_ports,
self.controller._get_ports_for_pg('pg_name')
)
m.assert_called_once_with(mock.ANY, 'get_object_property', self.vim,
pg, 'portKeys')
def test__increase_ports_on_portgroup(self):
ports_number = 8
pg_info = mock.Mock(numPorts=ports_number,
configVersion='_config_version_')
self.use_patch('vmware_dvs.utils.dvs_util.DVSController'
'._get_config_by_ref', return_value=pg_info)
_build_pg_update_spec = self.use_patch(
'vmware_dvs.utils.dvs_util.DVSController'
'._build_pg_update_spec',
return_value='_update_spec_')
pg = mock.Mock()
with mock.patch.object(self.controller.connection, 'invoke_api'):
self.controller._increase_ports_on_portgroup(pg)
_build_pg_update_spec.assert_called_once_with(
'_config_version_',
ports_number=ports_number * 2)
def test__increase_ports_on_portgroup_when_pg_dont_have_ports(self):
ports_number = 0
pg_info = mock.Mock(numPorts=ports_number,
configVersion='_config_version_')
self.use_patch('vmware_dvs.utils.dvs_util.DVSController'
'._get_config_by_ref', return_value=pg_info)
_build_pg_update_spec = self.use_patch(
'vmware_dvs.utils.dvs_util.DVSController'
'._build_pg_update_spec',
return_value='_update_spec_')
pg = mock.Mock()
with mock.patch.object(self.controller.connection, 'invoke_api'):
self.controller._increase_ports_on_portgroup(pg)
_build_pg_update_spec.assert_called_once_with(
'_config_version_', ports_number=dvs_util.INIT_PG_PORTS_COUNT)
def _get_connection_mock(self, dvs_name):
return mock.Mock(vim=self.vim)
class SpecBuilderTestCase(base.BaseTestCase):
def setUp(self):
super(SpecBuilderTestCase, self).setUp()
self.spec = mock.Mock(name='spec')
self.factory = mock.Mock(name='factory')
self.factory.create.return_value = self.spec
self.builder = spec_builder.SpecBuilder(self.factory)
def test_port_criteria_with_port_key(self):
criteria = self.builder.port_criteria(port_key='_some_port_')
self.factory.create.assert_called_once_with(
'ns0:DistributedVirtualSwitchPortCriteria'
)
self.assertEqual(criteria.portKey, '_some_port_')
self.assertNotIn('portgroupKey', dir(criteria))
def test_port_criteria_with_port_group_key(self):
criteria = self.builder.port_criteria(port_group_key='_port_group_key')
self.factory.create.assert_called_once_with(
'ns0:DistributedVirtualSwitchPortCriteria'
)
self.assertEqual(criteria.portgroupKey, '_port_group_key')
self.assertEqual(criteria.inside, '1')
self.assertNotIn('portKey', dir(criteria))
class UtilTestCase(base.BaseTestCase):
"""TestCase for functions in util module"""
def setUp(self):
super(UtilTestCase, self).setUp()
self.oslo_connection_mock = mock.Mock()
patch = mock.patch('oslo_vmware.api.VMwareAPISession',
return_value=self.oslo_connection_mock)
self.session_mock = patch.start()
self.addCleanup(patch.stop)
def test_empty_map_if_config_network_maps_is_empty(self):
CONF.set_override('network_maps', [], 'ML2_VMWARE')
self.assertDictEqual(
{},
dvs_util.create_network_map_from_config(CONF.ML2_VMWARE))
@mock.patch('vmware_dvs.utils.dvs_util.DVSController._get_dvs',
return_value=(mock.Mock(), 'datacenter1'))
def test_creates_network_map_from_conf(self, *args):
network_map = ['physnet1:dvSwitch', 'physnet2:dvSwitch1']
CONF.set_override(
'network_maps', network_map, 'ML2_VMWARE')
actual = dvs_util.create_network_map_from_config(CONF.ML2_VMWARE)
self.assertEqual(len(network_map), len(actual))
for net, dvs_name in [i.split(':') for i in network_map]:
controller = actual[net]
self.assertEqual(self.oslo_connection_mock, controller.connection)
vmware_conf = config.CONF.ML2_VMWARE
self.session_mock.assert_called_once_with(
vmware_conf.vsphere_hostname,
vmware_conf.vsphere_login,
vmware_conf.vsphere_password,
vmware_conf.api_retry_count,
vmware_conf.task_poll_interval,
pool_size=vmware_conf.connections_pool_size)
def test_wrap_retry_w_login_unsuccessful(self):
func = mock.Mock()
def side_effect(*args, **kwargs):
exception = vmware_exceptions.VMwareDriverException()
exception.message = dvs_const.LOGIN_PROBLEM_TEXT
raise exception
func.side_effect = side_effect
def double(*args, **kwargs):
return func(*args, **kwargs)
self.assertRaises(
vmware_exceptions.VMwareDriverException,
dvs_util.wrap_retry(double))
self.assertEqual(3, func.call_count)
def test_wrap_retry_w_concurrent_modification(self):
func = mock.Mock()
func.side_effect = [
exceptions.VMWareDVSException(
message=dvs_const.CONCURRENT_MODIFICATION_TEXT,
type='TestException',
cause='Test cause'
),
exceptions.VMWareDVSException(
message='Some exception text',
type='TestException',
cause='Test cause'
)
]
def double(*args, **kwargs):
return func(*args, **kwargs)
self.assertRaises(
exceptions.VMWareDVSException, dvs_util.wrap_retry(double))
self.assertEqual(2, func.call_count)
|
|
import unittest
import numpy
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
class IgnoreNumpyFloatingPointError(object):
def __enter__(self):
self.old_settings = numpy.seterr(all='ignore')
def __exit__(self, *args):
numpy.seterr(**self.old_settings)
class UnaryMathTestBase(object):
input = None
def setup(self):
in_dtype, = self.in_dtypes
in_kind = numpy.dtype(in_dtype).kind
if numpy.dtype(in_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
input = self.input
if (in_kind == 'u'
and isinstance(input, (int, float))
and input < 0):
raise unittest.SkipTest(
'Combination of uint dtype and negative input cannot be '
'tested')
def generate_inputs(self):
in_dtype, = self.in_dtypes
if isinstance(self.input, numpy.ndarray):
return self.input.astype(in_dtype),
if self.input == 'random':
return array_utils.uniform(self.shape, in_dtype),
if isinstance(self.input, (bool, int, float)):
return numpy.full(self.shape, self.input, dtype=in_dtype),
assert False
def forward_xp(self, inputs, xp):
a, = inputs
# This cast was introduced in order to avoid decreasing precision.
# ex.) numpy.sqrt(x) becomes a float16 array where x is an int8 array.
a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype)
with IgnoreNumpyFloatingPointError():
y = self.func(xp, a)
y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype)
return y,
class BinaryMathTestBase(object):
def setup(self):
in_dtype1, in_dtype2 = self.in_dtypes
kind1 = numpy.dtype(in_dtype1).kind
kind2 = numpy.dtype(in_dtype2).kind
if kind1 != 'f' or kind2 != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_double_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
in_shape1, in_shape2 = self.in_shapes
if self.input_lhs == 'random':
a = array_utils.uniform(in_shape1, in_dtype1)
elif isinstance(self.input_lhs, (bool, int, float)):
a = numpy.full(in_shape1, self.input_lhs, dtype=in_dtype1)
else:
assert False
if self.input_rhs == 'random':
b = array_utils.uniform(in_shape2, in_dtype2)
elif isinstance(self.input_rhs, (bool, int, float)):
b = numpy.full(in_shape2, self.input_rhs, dtype=in_dtype2)
else:
assert False
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
# This cast was introduced in order to avoid decreasing precision.
# ex.) x / y becomes a float16 array where x and y are an int8 arrays.
a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype)
b = dtype_utils.cast_if_numpy_array(xp, b, self.out_dtype)
with IgnoreNumpyFloatingPointError():
y = self.func(xp, a, b)
y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype)
return y,
class InplaceUnaryMathTestBase(UnaryMathTestBase):
skip_backward_test = True
skip_double_backward_test = True
def forward_xp(self, inputs, xp):
a, = inputs
if xp is chainerx:
a_ = a.as_grad_stopped().copy()
else:
a_ = a.copy()
with IgnoreNumpyFloatingPointError():
ret = self.func(xp, a_)
assert ret is None # func should not return anything
return a_,
class InplaceBinaryMathTestBase(BinaryMathTestBase):
skip_backward_test = True
skip_double_backward_test = True
def forward_xp(self, inputs, xp):
a, b = inputs
b = dtype_utils.cast_if_numpy_array(xp, b, a.dtype)
if xp is chainerx:
a_ = a.as_grad_stopped().copy()
b_ = b.as_grad_stopped()
else:
a_ = a.copy()
b_ = b
with IgnoreNumpyFloatingPointError():
ret = self.func(xp, a_, b_)
assert ret is None # func should not return anything
return a_,
def _convert_numpy_scalar(scalar, dtype):
# Implicit casting in NumPy's multiply depends on the 'casting' argument,
# which is not yet supported (ChainerX always casts).
# Therefore, we explicitly cast the scalar to the dtype of the ndarray
# before the multiplication for NumPy.
return numpy.dtype(dtype).type(scalar)
class MathScalarTestBase(UnaryMathTestBase):
def func(self, xp, a):
scalar = self.scalar_type(self.scalar_value)
return self.func_scalar(xp, a, scalar)
class InplaceMathScalarTestBase(InplaceUnaryMathTestBase):
def func(self, xp, a):
scalar = self.scalar_type(self.scalar_value)
if xp is numpy:
# This cast is to avoid TypeError in the following case
# a: uint8 0-dim numpy.ndarray
# scalar: int
in_dtype, = self.in_dtypes
scalar = _convert_numpy_scalar(scalar, in_dtype)
return self.func_scalar(xp, a, scalar)
def _permutate_shapes(shapes_list):
# Permutates input shapes
permutated_shapes_list = []
for in_shape1, in_shape2 in shapes_list:
permutated_shapes_list.append((in_shape1, in_shape2))
permutated_shapes_list.append((in_shape2, in_shape1))
return list(set(permutated_shapes_list))
shapes_combination_inplace_binary = [
# Same shapes
((1,), (1,)),
((3, 4), (3, 4)),
# Broadcast
((10,), (1,)),
((3, 4), (3, 1)),
((3, 4), (1, 4)),
((3, 4), (4,)),
((3, 4), (1, 1)),
((3, 4), (1,)),
((2, 3, 4), (1, 1, 1)),
# 0-dim shape
((), ()),
((1,), ()),
((3,), ()),
((2, 3), ()),
# 0-size shape
((0,), (0,)),
((0,), (1,)),
((0,), ()),
((2, 0, 3), (2, 0, 3)),
# TODO(imanishi): Fix strides
# ((2, 0, 3), (0, 1)),
]
shapes_combination_binary = _permutate_shapes([
# Broadcast
((3, 1), (1, 4)),
((2, 1, 4), (3, 1)),
# 0-size shape
# TODO(imanishi): Fix strides
# ((0, 1), (0, 1, 0)),
]) + _permutate_shapes(shapes_combination_inplace_binary)
# An association list that associates a dtype to the type which ChainerX's
# real-valued functions should return.
in_out_float_dtypes_math_functions = [
# Float.
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
]
in_out_dtypes_math_functions = in_out_float_dtypes_math_functions + [
# Signed int.
(('int8',), 'float32'),
(('int16',), 'float32'),
(('int32',), 'float32'),
(('int64',), 'float32'),
# Unsigned int.
(('uint8',), 'float32'),
# Bool.
(('bool_',), 'float32'),
]
in_out_dtypes_math_binary_functions = dtype_utils._permutate_dtype_mapping([
# integer mixed
(('int8', 'int16'), 'float32'),
(('int8', 'int32'), 'float32'),
(('int8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int16', 'int32'), 'float32'),
(('int16', 'int64'), 'float32'),
(('int16', 'uint8'), 'float32'),
(('int32', 'int64'), 'float32'),
(('int32', 'uint8'), 'float32'),
(('int64', 'uint8'), 'float32'),
# integer float mixed
(('int8', 'float16'), 'float16'),
(('int8', 'float32'), 'float32'),
(('int8', 'float64'), 'float64'),
(('int16', 'float16'), 'float16'),
(('int16', 'float32'), 'float32'),
(('int16', 'float64'), 'float64'),
(('int32', 'float16'), 'float16'),
(('int32', 'float32'), 'float32'),
(('int32', 'float64'), 'float64'),
(('int64', 'float16'), 'float16'),
(('int64', 'float32'), 'float32'),
(('int64', 'float64'), 'float64'),
(('uint8', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
(('uint8', 'float64'), 'float64'),
# float mixed
(('float16', 'float32'), 'float32'),
(('float16', 'float64'), 'float64'),
(('float32', 'float64'), 'float64'),
])
|
|
# -*- coding: utf-8 -*-
import sys
import curses
import logging
from argparse import ArgumentParser, SUPPRESS as SUPPRESS
from os import path, getenv, environ
from sys import platform, version_info
from contextlib import contextmanager
from platform import system
from .radio import PyRadio
from .config import PyRadioConfig, SUPPORTED_PLAYERS
from .install import PyRadioUpdate, PyRadioUpdateOnWindows, is_pyradio_user_installed, version_string_to_list, get_github_tag
PATTERN = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
PY3 = sys.version[0] == '3'
@contextmanager
def pyradio_config_file():
cf = PyRadioConfig()
try:
yield cf
finally:
try:
ret, lfile = cf.remove_session_lock_file()
if cf.force_to_remove_lock_file:
if ret == 0:
print('Lock file removed: "{}"'.format(lfile))
elif ret == 1:
print('Failed to remove Lock file: "{}"'.format(lfile))
else:
print('Lock file not found: "{}"'.format(lfile))
except:
pass
def __configureLogger():
logger = logging.getLogger('pyradio')
logger.setLevel(logging.DEBUG)
# Handler
fh = logging.FileHandler(path.join(path.expanduser('~'), 'pyradio.log'))
fh.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(PATTERN)
# add formatter to ch
fh.setFormatter(formatter)
# add ch to logger
logger.addHandler(fh)
def shell():
version_too_old = False
if sys.version_info[0] == 2:
if sys.version_info < (2, 7):
version_too_old = True
elif sys.version_info.major == 3 and sys.version_info < (3, 5):
version_too_old = True
if version_too_old:
print('PyRadio requires python 2.7 or 3.5+...')
sys.exit(1)
requested_player = ''
parser = ArgumentParser(description='Curses based Internet radio player')
parser.add_argument('-s', '--stations', default='',
help='Use specified station CSV file.')
parser.add_argument('-p', '--play', nargs='?', default='False',
help='Start and play.'
'The value is num station or empty for random.')
parser.add_argument('-u', '--use-player', default='',
help='Use specified player. '
'A comma-separated list can be used to specify detection order. '
'Supported players: mpv, mplayer, vlc.')
parser.add_argument('-a', '--add', action='store_true',
help='Add station to list.')
parser.add_argument('-ls', '--list-playlists', action='store_true',
help='List of available playlists in config dir.')
parser.add_argument('-l', '--list', action='store_true',
help='List of available stations in a playlist.')
parser.add_argument('-t', '--theme', default='', help='Use specified theme.')
parser.add_argument('-tlp', '--toggle-load-last-playlist', action='store_true',
help='Toggle autoload last opened playlist.')
parser.add_argument('-scd', '--show-config-dir', action='store_true',
help='Print config directory [CONFIG DIR] location and exit.')
parser.add_argument('-ocd', '--open-config-dir', action='store_true',
help='Open config directory [CONFIG DIR] with default file manager.')
parser.add_argument('-ep', '--extra-player_parameters', default=None,
help="Provide extra player parameters as a string. The parameter is saved in the configuration file and is activated for the current session. The string\'s format is [player_name:parameters]. player_name can be 'mpv', 'mplayer' or 'vlc'. Alternative format to pass a profile: [player_name:profile:profile_name]. In this case, the profile_name must be a valid profile defined in the player\'s config file (not for VLC).")
parser.add_argument('-ap', '--active-player-param-id', default=0, help='Specify the extra player parameter set to be used with the default player. ACTIVE_PLAYER_PARAM_ID is 1-11 (refer to the output of the -lp option)')
parser.add_argument('-lp', '--list-player-parameters', default=None,
action='store_true',
help='List extra players parameters.')
parser.add_argument('-U', '--update', action='store_true',
help='Update PyRadio.')
if platform.startswith('linux'):
parser.add_argument('--user', action='store_true', default=False,
help='Install only for current user (linux only).')
parser.add_argument('-R', '--uninstall', action='store_true',
help='Uninstall PyRadio.')
parser.add_argument('--unlock', action='store_true',
help="Remove sessions' lock file.")
parser.add_argument('-d', '--debug', action='store_true',
help='Start pyradio in debug mode.')
parser.add_argument('-V', '--version', action='store_true',
help='Display version information.')
''' extra downloads
only use them after the developer says so,
for debug purposes only
--devel download official devel branch
--sng-master download developer release (master)
--sng-devel download developer devel branch
--force-update give a versio > than current,
to check update notification functionality
'''
parser.add_argument('--sng-master', action='store_true', help=SUPPRESS)
parser.add_argument('--sng-devel', action='store_true', help=SUPPRESS)
parser.add_argument('--devel', action='store_true', help=SUPPRESS)
parser.add_argument('--force-update', default='', help=SUPPRESS)
args = parser.parse_args()
sys.stdout.flush()
config_already_read = False
with pyradio_config_file() as pyradio_config:
if args.version:
pyradio_config.get_pyradio_version()
print('PyRadio version: {}'.format(pyradio_config.current_pyradio_version))
print('Python version: {}'.format(sys.version.replace('\n', ' ').replace('\r', ' ')))
pyradio_config.read_config()
if pyradio_config.distro != 'None':
print('Distribution: {}'.format(pyradio_config.distro))
sys.exit()
if args.toggle_load_last_playlist:
if pyradio_config.locked:
print('Error: Another instance of PyRadio is already running!')
print(' Please close it and try again...')
sys.exit(1)
else:
read_config(pyradio_config)
pyradio_config.opts['open_last_playlist'][1] = not pyradio_config.opts['open_last_playlist'][1]
pyradio_config.opts['dirty_config'][1] = True
print('Setting auto load last playlist to: {}'.format(pyradio_config.opts['open_last_playlist'][1]))
save_config()
sys.exit(0)
package = 0
if args.uninstall or args.update:
if args.sng_master:
package = 1
elif args.sng_devel:
package = 2
elif args.devel:
package = 3
if not config_already_read:
read_config(pyradio_config)
config_already_read = True
if pyradio_config.distro != 'None' and \
not platform.startswith('win'):
no_update(args.uninstall)
if args.update:
if package == 0:
pyradio_config.get_pyradio_version()
last_tag = get_github_tag()
if last_tag:
print('Released version : {}'.format(last_tag))
print('Installed version : {}'.format(pyradio_config.current_pyradio_version))
if version_string_to_list(last_tag) <= version_string_to_list(pyradio_config.current_pyradio_version):
print('Latest version already installed. Nothing to do....')
sys.exit()
else:
print('Error reading online version.\nPlease make sure you are connected to the internet and try again.')
sys.exit(1)
python_version_to_use = 3 if PY3 else 2
try:
upd = PyRadioUpdate(
package=package,
python_version_to_use=python_version_to_use
)
if platform.startswith('linux'):
upd.user = args.user
upd.update_pyradio()
except RuntimeError:
upd = PyRadioUpdateOnWindows(
package=package,
python_version_to_use=python_version_to_use
)
upd.update_or_uninstall_on_windows(mode='update-open')
sys.exit()
if args.uninstall:
python_version_to_use = 3 if PY3 else 2
try:
upd = PyRadioUpdate(
package=package,
python_version_to_use=python_version_to_use
)
upd.remove_pyradio()
except RuntimeError:
upd = PyRadioUpdateOnWindows(
package=package,
python_version_to_use=python_version_to_use
)
upd.update_or_uninstall_on_windows(mode='uninstall-open')
sys.exit()
''' check conflicting parameters '''
if args.active_player_param_id and \
args.extra_player_parameters:
print('Error: You cannot use parameters "-ep" and "-ap" together!\n')
sys.exit(1)
''' user specified extra player parameter '''
if args.active_player_param_id:
try:
a_param = int(args.active_player_param_id)
except ValueError:
print('Error: Parameter -ap is not a number\n')
sys.exit(1)
if 1 <= a_param <= 11:
pyradio_config.user_param_id = a_param
else:
print('Error: Parameter -ap must be between 1 and 11')
print(' Actually, it must be between 1 and the maximum')
print(' number of parameters for your default player.\n')
args.list_player_parameters = True
''' list extra player parameters '''
if args.list_player_parameters:
print('PyRadio Players Extra Parameters')
print(32 * '-')
read_config(pyradio_config)
default_player_name = pyradio_config.opts['player'][1].replace(' ', '').split(',')[0]
if default_player_name == '':
default_player_name = SUPPORTED_PLAYERS[0]
for a_player in SUPPORTED_PLAYERS:
if default_player_name == a_player:
print('Player: ' + a_player + ' (default)')
else:
print('Player: ' + a_player)
default = 0
for i, a_param in enumerate(pyradio_config.saved_params[a_player]):
if i == 0:
default = int(a_param)
else:
str_default = '(default)' if i == default else ''
count = str(i) if i > 9 else ' ' + str(i)
print(' {0}. {1} {2}'.format(count, a_param, str_default))
print('')
sys.exit()
''' extra player parameters '''
if args.extra_player_parameters:
if ':' in args.extra_player_parameters:
if pyradio_config.locked:
print('Error: This session is locked!')
print(' Please exist any other instances of the program')
print(' that are currently running and try again.')
sys.exit(1)
else:
if args.extra_player_parameters.startswith('vlc:profile'):
print('Error in parameter: "-ep".')
print(' VLC does not supports profiles\n')
sys.exit()
else:
pyradio_config.command_line_params = args.extra_player_parameters
else:
print('Error in parameter: "-ep".')
print(' Parameter format: "player_name:parameters"')
print(' or "player_name:profile:name_of_profile"\n')
sys.exit()
if args.unlock:
pyradio_config.locked = False
pyradio_config.force_to_remove_lock_file = True
sys.exit()
if args.show_config_dir:
print('PyRadio config dir: "{}"'.format(pyradio_config.stations_dir))
sys.exit()
if args.open_config_dir:
open_conf_dir(pyradio_config)
sys.exit()
if args.list_playlists:
pyradio_config.list_playlists()
sys.exit()
if args.list is False and args.add is False:
print('Reading config...')
if not config_already_read:
read_config(pyradio_config)
config_already_read = True
if args.use_player != '':
requested_player = args.use_player
if args.list is False and args.add is False:
print('Reading playlist...')
sys.stdout.flush()
is_last_playlist = False
if pyradio_config.open_last_playlist:
last_playlist = pyradio_config.get_last_playlist()
if last_playlist:
args.stations = last_playlist
is_last_playlist = True
ret = pyradio_config.read_playlist_file(
stationFile=args.stations,
is_last_playlist=is_last_playlist)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
# No need to parse the file if we add station
# Actually we do need to do so now, so that we
# handle 2-column vs. 3-column playlists
if args.add:
if sys.version_info < (3, 0):
params = raw_input("Enter the name: "), raw_input("Enter the url: "), raw_input("Enter the encoding (leave empty for '" + pyradio_config.default_encoding + "'): ")
else:
params = input("Enter the name: "), input("Enter the url: "), input("Enter the encoding (leave empty for '" + pyradio_config.default_encoding + "'): ")
msg = ('name', 'url')
for i, a_param in enumerate(params):
if i < 2:
if a_param.strip() == '':
print('** Error: No {} entered. Aborting...'.format(msg[i]))
sys.exit(1)
ret = pyradio_config.append_station(params, args.stations)
if ret < 0:
print_playlist_selection_error(args.stations, pyradio_config, ret)
sys.exit()
if args.list:
header_format_string, format_string = get_format_string(pyradio_config.stations)
header_string = header_format_string.format('[Name]','[URL]','[Encoding]')
print(header_string)
print(len(header_string) * '-')
for num, a_station in enumerate(pyradio_config.stations):
if a_station[2]:
encoding = a_station[2]
else:
encoding = pyradio_config.default_encoding
print(format_string.format(str(num+1), a_station[0], a_station[1], encoding))
sys.exit()
if args.debug:
__configureLogger()
if platform.startswith('win'):
print('Debug mode activated\n printing messages to file: "{}\pyradio.log"'.format(getenv('USERPROFILE')))
else:
print('Debug mode activated; printing messages to file: "~/pyradio.log"')
else:
''' Refer to https://docs.python.org/3.7/howto/logging.html
section "What happens if no configuration is provided"
'''
logging.raiseExceptions = False
logging.lastResort = None
if requested_player == '':
requested_player = pyradio_config.player
#else:
# pyradio_config.requested_player = requested_player
if args.play == 'False':
if args.stations == '':
args.play = pyradio_config.default_station
elif args.play is not None:
try:
check_int = int(args.play)
except:
print('Error: Invalid parameter (-p ' + args.play + ')')
sys.exit(1)
if args.play == '-1':
args.play = 'False'
''' get auto play last playlist data '''
if pyradio_config.last_playlist_to_open != []:
pre_select = pyradio_config.last_playlist_to_open[1]
if pyradio_config.last_playlist_to_open[2] > -1:
args.play = str(pyradio_config.last_playlist_to_open[2] + 1)
else:
args.play = 'False'
else:
pre_select = 'False'
theme_to_use = args.theme
if not theme_to_use:
theme_to_use = pyradio_config.theme
# Starts the radio TUI.
pyradio = PyRadio(
pyradio_config,
play=args.play,
pre_select=pre_select,
req_player=requested_player,
theme=theme_to_use,
force_update=args.force_update
)
''' Setting ESCAPE key delay to 25ms
Refer to: https://stackoverflow.com/questions/27372068/why-does-the-escape-key-have-a-delay-in-python-curses
'''
environ.setdefault('ESCDELAY', '25')
''' set window title '''
if platform.startswith('win'):
import ctypes
try:
if pyradio_config.locked:
win_title = 'PyRadio: Your Internet Radio Player (Session Locked)'
else:
win_title = 'PyRadio: Your Internet Radio Player'
ctypes.windll.kernel32.SetConsoleTitleW(win_title)
except:
pass
else:
try:
if pyradio_config.locked:
sys.stdout.write('\x1b]2;PyRadio: Your Internet Radio Player (Session Locked)\x07')
else:
sys.stdout.write('\x1b]2;PyRadio: Your Internet Radio Player\x07')
except:
pass
sys.stdout.flush()
''' curses wrapper '''
curses.wrapper(pyradio.setup)
''' curses is off '''
if pyradio.setup_return_status:
if pyradio_config.PROGRAM_UPDATE:
if platform.startswith('win'):
upd = PyRadioUpdateOnWindows()
upd.update_or_uninstall_on_windows(mode='update-open')
else:
upd = PyRadioUpdate()
upd.user = is_pyradio_user_installed()
upd.update_pyradio()
else:
print('\nThank you for using PyRadio. Cheers!')
else:
print('\nThis terminal can not display colors.\nPyRadio cannot function in such a terminal.\n')
def read_config(pyradio_config):
ret = pyradio_config.read_config()
if ret == -1:
print('Error opening config: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
elif ret == -2:
print('Config file is malformed: "{}"'.format(pyradio_config.config_file))
sys.exit(1)
def save_config(pyradio_config):
ret = pyradio_config.save_config(from_command_line=True)
if ret == -1:
print('Error saving config!')
sys.exit(1)
def no_update(uninstall):
action = 'uninstall' if uninstall else 'update'
print('PyRadio has been installed using either pip or your distribution\'s\npackage manager. Please use that to {} it.\n'.format(action))
sys.exit(1)
def print_playlist_selection_error(a_selection, cnf, ret, exit_if_malformed=True):
if exit_if_malformed:
if ret == -1:
print('Error: playlist is malformed: "{}"'.format(a_selection))
sys.exit(1)
if ret == -2:
print('Error: Specified playlist not found')
sys.exit(1)
elif ret == -3:
print('Error: Negative playlist number specified')
sys.exit(1)
elif ret == -4:
print('Error: Specified numbered playlist not found')
cnf.list_playlists()
sys.exit(1)
elif ret == -5:
print('Error: Failed to write playlist')
sys.exit(1)
elif ret == -6:
print('Error: Failed to rename playlist')
sys.exit(1)
elif ret == -7:
print('Error: Playlist recovery failed!\n')
if cnf.playlist_recovery_result == 1:
msg = '''Both a playlist file (CSV) and a playlist backup file (TXT)
exist for the selected playlist. In this case, PyRadio would
try to delete the CSV file, and then rename the TXT file to CSV.\n
Unfortunately, deleting the CSV file has failed, so you have to
manually address the issue.'''
else:
msg = '''A playlist backup file (TXT) has been found for the selected
playlist. In this case, PyRadio would try to rename this file
to CSV.\n
Unfortunately, renaming this file has failed, so you have to
manually address the issue.'''
print(msg)
#open_conf_dir(cnf)
sys.exit(1)
elif ret == -8:
print('File type not supported')
sys.exit(1)
def open_conf_dir(cnf):
import subprocess
import os
import platform
if platform.system().lower() == 'windows':
os.startfile(cnf.stations_dir)
elif platform.system().lower() == 'darwin':
subprocess.Popen(['open', cnf.stations_dir])
else:
subprocess.Popen(['xdg-open', cnf.stations_dir])
def get_format_string(stations):
len0 = len1 = 0
for n in stations:
if len(n[0]) > len0:
len0 = len(n[0])
if len(n[1]) > len1:
len1 = len(n[1])
num = len(str(len(stations)))
format_string = '{0:>' + str(num) + '.' + str(num) + 's}. ' + '{1:' + str(len0) + '.' + str(len0) + 's} | {2:' + str(len1) + '.' + str(len1) + 's} | {3}'
header_format_string = '{0:' + str(len0+num+2) + '.' + str(len0+num+2) + 's} | {1:' + str(len1) + '.' + str(len1) + 's} | {2}'
return header_format_string, format_string
if __name__ == '__main__':
shell()
|
|
# Copyright (C) 2011 discretelogics
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
''' pytest tests '''
import tempfile
import os
import sys
from teafiles import *
def setup_module(m):
module = m
module.testfiles = []
def gettempfilename():
filename = tempfile.mktemp(".tea")
module = sys.modules[__name__]
module.testfiles.append(filename)
return filename
def teardown_module(module):
for filename in module.testfiles:
os.remove(filename)
def gettempfilename():
return tempfile.mktemp(".tea")
def test_create_and_read():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq") as tf:
tf.write(1, 2, 3)
tf.write(21, 22, 23)
with TeaFile.openread(filename) as tf:
assert tf.itemcount == 2
item = tf.read()
assert item
assert item.A == 1
assert item.B == 2
assert item.C == 3
item = tf.read()
assert item
assert item.A == 21
assert item.B == 22
assert item.C == 23
assert not tf.read()
def test_itemarea_is_set_after_create():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq") as tf:
assert tf.description.itemdescription.itemname == "ABC"
assert tf.itemareastart > 32 # file holds item description, so item area starts after core header
assert tf._getitemareaend() > 0
assert tf._getitemareaend() == tf.itemareastart
assert tf._getitemareasize() == 0
assert tf.itemcount == 0
def test_itemarea_is_set_after_open():
filename = gettempfilename()
with TeaFile.create(filename, "A B C") as tf:
pass
with TeaFile.openread(filename) as tf:
assert tf.description.itemdescription.itemname == "ABC"
assert tf.itemareastart > 0
assert tf._getitemareaend() > 0
assert tf._getitemareaend() == tf.itemareastart
assert tf._getitemareasize() == 0
assert tf.itemcount == 0
def test_itemcount():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq") as tf:
assert tf.itemcount == 0
for i in range(1, 11):
tf.write(i, 22, 33)
tf.flush() # required, to update the filesize correctly
assert tf.itemcount == i
with TeaFile.openread(filename) as tf:
assert tf.itemcount == 10
def test_seekitem():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq") as tf:
for i in range(10):
tf.write(i, 10 * i, 100 * i)
with TeaFile.openread(filename) as tf:
item = tf.read()
assert len(item) == 3
assert item[0] == 0
assert item[1] == 0
tf.seekitem(5)
item = tf.read()
assert item[0] == 5
assert item[1] == 50
tf.seekitem(2)
item = tf.read()
assert item[0] == 2
assert item[1] == 20
def test_seekitem2():
filename = gettempfilename()
with TeaFile.create(filename, "A B", "qq") as tf:
tf.write(1, 1)
tf.write(2, 2)
tf.seekitem(0)
tf.write(3, 3)
with TeaFile.openread(filename) as tf:
assert tf.read() == (3, 3)
assert tf.read() == (2, 2)
with TeaFile.openwrite(filename) as tf:
tf.seekend()
tf.write(4, 4)
tf.write(5, 5)
with TeaFile.openread(filename) as tf:
assert tf.read() == (3, 3)
assert tf.read() == (2, 2)
assert tf.read() == (4, 4)
assert tf.read() == (5, 5)
def test_openwrite():
filename = gettempfilename()
with TeaFile.create(filename, "A B", "qq") as tf:
for i in range(3):
tf.write(i, i * 10)
with TeaFile.openwrite(filename) as tf:
tf.write(77, 770)
with TeaFile.openread(filename) as tf:
assert tf.read()[0] == 0
assert tf.read()[0] == 1
assert tf.read()[0] == 2
assert tf.read()[0] == 77
with TeaFile.openwrite(filename) as tf:
tf.seekitem(0)
tf.write(44, 440)
with TeaFile.openread(filename) as tf:
assert tf.read()[0] == 44
assert tf.read()[0] == 1
assert tf.read()[0] == 2
assert tf.read()[0] == 77
def test_printsnapshot():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq", \
"here goes the content description!", \
{"data source": "Bluum", "decimals": 4}) as tf:
tf.write(1, 2, 3)
tf.write(2, 2, 3)
TeaFile.printsnapshot(filename)
def test_namevalues():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq", "mycontent", {"a": 1, "bb": 22}) as tf:
pass
with TeaFile.openread(filename) as tf:
nvs = tf.description.namevalues
assert nvs["a"] == 1
assert nvs["bb"] == 22
assert len(nvs) == 2
def test_decimals():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq", "mycontent", {"decimals": 3, "bb": 22}) as tf:
pass
with TeaFile.openread(filename) as tf:
nvs = tf.description.namevalues
assert tf.decimals == 3
def test_items_iteration():
filename = gettempfilename()
with TeaFile.create(filename, "A B C", "qqq") as tf:
tf.write(1, 2, 3)
tf.write(21, 22, 23)
with TeaFile.openread(filename) as tf:
iter = tf.items()
assert len([item for item in tf.items()]) == 2
if __name__ == '__main__':
pass
# to be run with pytest. for debugging purposes, tests may be executed here.
#import sys
#module = sys.modules[__name__]
#setup_module(module)
#
#test_items_iteration()
#
#teardown_module(module)
|
|
"""
Tests for the transport-agnostic engine module.
"""
import unittest
from coilmq.engine import StompEngine
from coilmq.util.frames import Frame, ReceiptFrame
from tests.mock import (MockAuthenticator, MockConnection, MockQueueManager, MockTopicManager)
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class EngineTest(unittest.TestCase):
def setUp(self):
self.qm = MockQueueManager()
self.tm = MockTopicManager()
self.conn = MockConnection()
self.auth = MockAuthenticator()
self.engine = StompEngine(connection=self.conn,
queue_manager=self.qm,
topic_manager=self.tm,
authenticator=None)
def tearDown(self):
self.conn.reset()
def _connect(self):
""" Call the engine connect() method so that we have a valid 'session'. """
self.engine.process_frame(Frame('CONNECT'))
def assertErrorFrame(self, frame, msgsub):
""" Assert that the passed in frame is an error frame and that message contains specified
string.
"""
assert frame.cmd.lower() == 'error'
assert msgsub.lower() in frame.headers['message'].lower()
def test_connect_no_auth(self):
""" Test the CONNECT command with no auth required. """
assert self.engine.connected == False
self.engine.process_frame(Frame('CONNECT'))
assert self.engine.connected == True
def test_connect_auth(self):
""" Test the CONNECT command when auth is required. """
self.engine.authenticator = self.auth
assert self.engine.connected == False
self.engine.process_frame(Frame('CONNECT'))
self.assertErrorFrame(self.conn.frames[-1], 'Auth')
assert self.engine.connected == False
self.engine.process_frame(Frame('CONNECT', headers={'login': MockAuthenticator.LOGIN,
'passcode': MockAuthenticator.PASSCODE}))
assert self.engine.connected == True
def test_subscribe_noack(self):
""" Test subscribing to topics and queues w/ no ACK. """
self._connect()
self.engine.process_frame(
Frame('SUBSCRIBE', headers={'destination': '/queue/bar'}))
assert self.conn in self.qm.queues['/queue/bar']
self.engine.process_frame(
Frame('SUBSCRIBE', headers={'destination': '/foo/bar'}))
assert self.conn in self.tm.topics['/foo/bar']
def test_send(self):
""" Test sending to a topic and queue. """
self._connect()
msg = Frame('SEND', headers={
'destination': '/queue/foo'}, body='QUEUEMSG-BODY')
self.engine.process_frame(msg)
self.assertEqual(msg, self.qm.messages[-1])
msg = Frame('SEND', headers={
'destination': '/topic/foo'}, body='TOPICMSG-BODY')
self.engine.process_frame(msg)
self.assertEqual(msg, self.tm.messages[-1])
msg = Frame('SEND', headers={}, body='TOPICMSG-BODY')
self.engine.process_frame(msg)
self.assertErrorFrame(self.conn.frames[-1], 'Missing destination')
def test_receipt(self):
""" Test pushing frames with a receipt specified. """
self._connect()
receipt_id = 'FOOBAR'
msg = Frame('SEND', headers={
'destination': '/queue/foo', 'receipt': receipt_id}, body='QUEUEMSG-BODY')
self.engine.process_frame(msg)
rframe = self.conn.frames[-1]
self.assertIsInstance(rframe, ReceiptFrame)
self.assertEqual(receipt_id, rframe.headers.get('receipt-id'))
receipt_id = 'FOOBAR2'
self.engine.process_frame(Frame('SUBSCRIBE', headers={
'destination': '/queue/bar', 'receipt': receipt_id}))
rframe = self.conn.frames[-1]
self.assertIsInstance(rframe, ReceiptFrame)
self.assertEqual(receipt_id, rframe.headers.get('receipt-id'))
def test_subscribe_ack(self):
""" Test subscribing to a queue with ack=true """
self._connect()
self.engine.process_frame(Frame('SUBSCRIBE', headers={'destination': '/queue/bar',
'ack': 'client'}))
assert self.conn.reliable_subscriber == True
assert self.conn in self.qm.queues['/queue/bar']
def test_unsubscribe(self):
""" Test the UNSUBSCRIBE command. """
self._connect()
self.engine.process_frame(
Frame('SUBSCRIBE', headers={'destination': '/queue/bar'}))
assert self.conn in self.qm.queues['/queue/bar']
self.engine.process_frame(
Frame('UNSUBSCRIBE', headers={'destination': '/queue/bar'}))
assert self.conn not in self.qm.queues['/queue/bar']
self.engine.process_frame(
Frame('UNSUBSCRIBE', headers={'destination': '/invalid'}))
def test_begin(self):
""" Test transaction BEGIN. """
self._connect()
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': 'abc'}))
assert 'abc' in self.engine.transactions
assert len(self.engine.transactions['abc']) == 0
def test_commit(self):
""" Test transaction COMMIT. """
self._connect()
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': 'abc'}))
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': '123'}))
self.engine.process_frame(Frame(
'SEND', headers={'destination': '/dest', 'transaction': 'abc'}, body='ASDF'))
self.engine.process_frame(Frame(
'SEND', headers={'destination': '/dest', 'transaction': 'abc'}, body='ASDF'))
self.engine.process_frame(Frame(
'SEND', headers={'destination': '/dest', 'transaction': '123'}, body='ASDF'))
assert len(self.tm.messages) == 0
self.engine.process_frame(
Frame('COMMIT', headers={'transaction': 'abc'}))
assert len(self.tm.messages) == 2
assert len(self.engine.transactions) == 1
self.engine.process_frame(
Frame('COMMIT', headers={'transaction': '123'}))
assert len(self.tm.messages) == 3
assert len(self.engine.transactions) == 0
def test_commit_invalid(self):
""" Test invalid states for transaction COMMIT. """
self._connect()
# Send a message with invalid transaction
f = Frame('SEND', headers={
'destination': '/dest', 'transaction': '123'}, body='ASDF')
self.engine.process_frame(f)
self.assertErrorFrame(self.conn.frames[-1], 'invalid transaction')
# Attempt to commit invalid transaction
self.engine.process_frame(
Frame('COMMIT', headers={'transaction': 'abc'}))
# Attempt to commit already-committed transaction
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': 'abc'}))
self.engine.process_frame(Frame(
'SEND', headers={'destination': '/dest', 'transaction': 'abc'}, body='FOO'))
self.engine.process_frame(
Frame('COMMIT', headers={'transaction': 'abc'}))
self.engine.process_frame(
Frame('COMMIT', headers={'transaction': 'abc'}))
self.assertErrorFrame(self.conn.frames[-1], 'invalid transaction')
def test_abort(self):
""" Test transaction ABORT. """
self._connect()
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': 'abc'}))
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': '123'}))
f1 = Frame('SEND', headers={
'destination': '/dest', 'transaction': 'abc'}, body='ASDF')
self.engine.process_frame(f1)
f2 = Frame('SEND', headers={
'destination': '/dest', 'transaction': 'abc'}, body='ASDF')
self.engine.process_frame(f2)
f3 = Frame('SEND', headers={
'destination': '/dest', 'transaction': '123'}, body='ASDF')
self.engine.process_frame(f3)
assert len(self.tm.messages) == 0
self.engine.process_frame(
Frame('ABORT', headers={'transaction': 'abc'}))
assert len(self.tm.messages) == 0
assert len(self.engine.transactions) == 1
def test_abort_invalid(self):
""" Test invalid states for transaction ABORT. """
self._connect()
self.engine.process_frame(
Frame('ABORT', headers={'transaction': 'abc'}))
self.assertErrorFrame(self.conn.frames[-1], 'invalid transaction')
self.engine.process_frame(
Frame('BEGIN', headers={'transaction': 'abc'}))
self.engine.process_frame(
Frame('ABORT', headers={'transaction': 'abc'}))
self.engine.process_frame(
Frame('ABORT', headers={'transaction': 'abc2'}))
self.assertErrorFrame(self.conn.frames[-1], 'invalid transaction')
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as t_flows
import openstack_dashboard.dashboards.project.data_processing. \
clusters.workflows.create as c_flow
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
DATA_SOURCE_CREATE_URL = ("horizon:project:data_processing.data_sources"
":create-data-source")
class JobExecutionGeneralConfigAction(workflows.Action):
job_input = forms.DynamicChoiceField(
label=_("Input"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL,
required=False)
job_output = forms.DynamicChoiceField(
label=_("Output"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL,
required=False)
def __init__(self, request, *args, **kwargs):
super(JobExecutionGeneralConfigAction, self).__init__(request,
*args,
**kwargs)
if request.REQUEST.get("job_id", None) is None:
self.fields["job"] = forms.ChoiceField(
label=_("Job"))
self.fields["job"].choices = self.populate_job_choices(request)
else:
self.fields["job"] = forms.CharField(
widget=forms.HiddenInput(),
initial=request.REQUEST.get("job_id", None))
def populate_job_input_choices(self, request, context):
return self.get_data_source_choices(request, context)
def populate_job_output_choices(self, request, context):
return self.get_data_source_choices(request, context)
def get_data_source_choices(self, request, context):
try:
data_sources = saharaclient.data_source_list(request)
except Exception:
data_sources = []
exceptions.handle(request,
_("Unable to fetch data sources."))
choices = [(data_source.id, data_source.name)
for data_source in data_sources]
choices.insert(0, (None, 'None'))
return choices
def populate_job_choices(self, request):
try:
jobs = saharaclient.job_list(request)
except Exception:
jobs = []
exceptions.handle(request,
_("Unable to fetch jobs."))
choices = [(job.id, job.name)
for job in jobs]
return choices
class Meta(object):
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobExecutionExistingGeneralConfigAction(JobExecutionGeneralConfigAction):
cluster = forms.ChoiceField(
label=_("Cluster"),
initial=(None, "None"),
widget=forms.Select(attrs={"class": "cluster_choice"}))
def populate_cluster_choices(self, request, context):
try:
clusters = saharaclient.cluster_list(request)
except Exception:
clusters = []
exceptions.handle(request,
_("Unable to fetch clusters."))
choices = [(cluster.id, cluster.name)
for cluster in clusters]
return choices
class Meta(object):
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobConfigAction(workflows.Action):
MAIN_CLASS = "edp.java.main_class"
JAVA_OPTS = "edp.java.java_opts"
EDP_MAPPER = "edp.streaming.mapper"
EDP_REDUCER = "edp.streaming.reducer"
EDP_PREFIX = "edp."
property_name = forms.ChoiceField(
required=False,
)
job_configs = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_params = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_args_array = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_type = forms.CharField(
required=False,
widget=forms.HiddenInput())
main_class = forms.CharField(label=_("Main Class"),
required=False)
java_opts = forms.CharField(label=_("Java Opts"),
required=False)
streaming_mapper = forms.CharField(label=_("Mapper"))
streaming_reducer = forms.CharField(label=_("Reducer"))
def __init__(self, request, *args, **kwargs):
super(JobConfigAction, self).__init__(request, *args, **kwargs)
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
job_ex_id = request.REQUEST.get("job_execution_id")
job_ex = saharaclient.job_execution_get(request, job_ex_id)
job_configs = job_ex.job_configs
edp_configs = {}
if 'configs' in job_configs:
configs, edp_configs = (
self.clean_edp_configs(job_configs['configs']))
self.fields['job_configs'].initial = (
json.dumps(configs))
if 'params' in job_configs:
self.fields['job_params'].initial = (
json.dumps(job_configs['params']))
job_args = json.dumps(job_configs['args'])
self.fields['job_args_array'].initial = job_args
if self.MAIN_CLASS in edp_configs:
self.fields['main_class'].initial = (
edp_configs[self.MAIN_CLASS])
if self.JAVA_OPTS in edp_configs:
self.fields['java_opts'].initial = (
edp_configs[self.JAVA_OPTS])
if self.EDP_MAPPER in edp_configs:
self.fields['streaming_mapper'].initial = (
edp_configs[self.EDP_MAPPER])
if self.EDP_REDUCER in edp_configs:
self.fields['streaming_reducer'].initial = (
edp_configs[self.EDP_REDUCER])
def clean(self):
cleaned_data = super(workflows.Action, self).clean()
job_type = cleaned_data.get("job_type", None)
if job_type != "MapReduce.Streaming":
if "streaming_mapper" in self._errors:
del self._errors["streaming_mapper"]
if "streaming_reducer" in self._errors:
del self._errors["streaming_reducer"]
return cleaned_data
def populate_property_name_choices(self, request, context):
job_id = request.REQUEST.get("job_id") or request.REQUEST.get("job")
job_type = saharaclient.job_get(request, job_id).type
job_configs = (
saharaclient.job_get_configs(request, job_type).job_config)
choices = [(param['value'], param['name'])
for param in job_configs['configs']]
return choices
def clean_edp_configs(self, configs):
edp_configs = {}
for key, value in configs.iteritems():
if key.startswith(self.EDP_PREFIX):
edp_configs[key] = value
for rmkey in edp_configs.keys():
del configs[rmkey]
return (configs, edp_configs)
class Meta(object):
name = _("Configure")
help_text_template = (
"project/data_processing.jobs/_launch_job_configure_help.html")
class JobExecutionGeneralConfig(workflows.Step):
action_class = JobExecutionGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if (v in [None, ""]) else v
else:
context["job_general_" + k] = v
return context
class JobExecutionExistingGeneralConfig(workflows.Step):
action_class = JobExecutionExistingGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if (v in [None, ""]) else v
else:
context["job_general_" + k] = v
return context
class JobConfig(workflows.Step):
action_class = JobConfigAction
template_name = 'project/data_processing.jobs/config_template.html'
def contribute(self, data, context):
job_config = self.clean_configs(
json.loads(data.get("job_configs", '{}')))
job_params = self.clean_configs(
json.loads(data.get("job_params", '{}')))
job_args_array = self.clean_configs(
json.loads(data.get("job_args_array", '[]')))
job_type = data.get("job_type", '')
context["job_type"] = job_type
context["job_config"] = {"configs": job_config}
context["job_config"]["args"] = job_args_array
if job_type in ["Java", "Spark"]:
context["job_config"]["configs"][JobConfigAction.MAIN_CLASS] = (
data.get("main_class", ""))
context["job_config"]["configs"][JobConfigAction.JAVA_OPTS] = (
data.get("java_opts", ""))
elif job_type == "MapReduce.Streaming":
context["job_config"]["configs"][JobConfigAction.EDP_MAPPER] = (
data.get("streaming_mapper", ""))
context["job_config"]["configs"][JobConfigAction.EDP_REDUCER] = (
data.get("streaming_reducer", ""))
else:
context["job_config"]["params"] = job_params
return context
@staticmethod
def clean_configs(configs):
cleaned_conf = None
if isinstance(configs, dict):
cleaned_conf = dict([(k.strip(), v.strip())
for k, v in configs.items()
if len(v.strip()) > 0 and len(k.strip()) > 0])
elif isinstance(configs, list):
cleaned_conf = list([v.strip() for v in configs
if len(v.strip()) > 0])
return cleaned_conf
class NewClusterConfigAction(c_flow.GeneralConfigAction):
persist_cluster = forms.BooleanField(
label=_("Persist cluster after job exit"),
required=False)
class Meta(object):
name = _("Configure Cluster")
help_text_template = (
"project/data_processing.clusters/_configure_general_help.html")
class ClusterGeneralConfig(workflows.Step):
action_class = NewClusterConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["cluster_general_" + k] = v
return context
class LaunchJob(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.job_executions:index"
default_steps = (JobExecutionExistingGeneralConfig, JobConfig)
def handle(self, request, context):
saharaclient.job_execution_create(
request,
context["job_general_job"],
context["job_general_cluster"],
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
return True
class SelectHadoopPluginAction(t_flows.SelectPluginAction):
def __init__(self, request, *args, **kwargs):
super(SelectHadoopPluginAction, self).__init__(request,
*args,
**kwargs)
self.fields["job_id"] = forms.ChoiceField(
label=_("Plugin name"),
initial=request.GET.get("job_id") or request.POST.get("job_id"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_configs"] = forms.ChoiceField(
label=_("Job configs"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_args"] = forms.ChoiceField(
label=_("Job args"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_params"] = forms.ChoiceField(
label=_("Job params"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
self.fields["job_execution_id"] = forms.ChoiceField(
label=_("Job Execution ID"),
initial=request.REQUEST.get("job_execution_id"),
widget=forms.HiddenInput(
attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
job_configs = (
saharaclient.job_execution_get(request,
job_ex_id).job_configs)
if "configs" in job_configs:
self.fields["job_configs"].initial = (
json.dumps(job_configs["configs"]))
if "params" in job_configs:
self.fields["job_params"].initial = (
json.dumps(job_configs["params"]))
if "args" in job_configs:
self.fields["job_args"].initial = (
json.dumps(job_configs["args"]))
class Meta(object):
name = _("Select plugin and hadoop version for cluster")
help_text_template = ("project/data_processing.clusters/"
"_create_general_help.html")
class SelectHadoopPlugin(workflows.Step):
action_class = SelectHadoopPluginAction
class ChosePluginVersion(workflows.Workflow):
slug = "lunch_job"
name = _("Launch Job")
finalize_button_name = _("Create")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectHadoopPlugin,)
class LaunchJobNewCluster(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.jobs:index"
default_steps = (ClusterGeneralConfig,
JobExecutionGeneralConfig,
JobConfig)
def handle(self, request, context):
node_groups = None
plugin, hadoop_version = (
whelpers.get_plugin_and_hadoop_version(request))
ct_id = context["cluster_general_cluster_template"] or None
user_keypair = context["cluster_general_keypair"] or None
try:
cluster = saharaclient.cluster_create(
request,
context["cluster_general_cluster_name"],
plugin, hadoop_version,
cluster_template_id=ct_id,
default_image_id=context["cluster_general_image"],
description=context["cluster_general_description"],
node_groups=node_groups,
user_keypair_id=user_keypair,
is_transient=not(context["cluster_general_persist_cluster"]),
net_id=context.get(
"cluster_general_neutron_management_network",
None))
except Exception:
exceptions.handle(request,
_("Unable to create new cluster for job."))
return False
try:
saharaclient.job_execution_create(
request,
context["job_general_job"],
cluster.id,
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
except Exception:
exceptions.handle(request,
_("Unable to launch job."))
return False
return True
|
|
import sys, random, string, uuid, pickle, zlib, base64
from Bio.Range import GenomicRange, ranges_to_coverage, merge_ranges
from Bio.Sequence import rc
import Bio.Graph
class Transcript:
def __init__(self):
self._exons = []
self._junctions = []
self._direction = None
self._transcript_name = None
self._gene_name = None
self._name = None # for a single name
self._range = None # set if not chimeric
self._id = str(uuid.uuid4())
self._payload = []
self._sequence = None
# _initialize is a dummy function that is run when methods are accessed
# This allows us to hold of running the time consuming initialization of
# the GPD until its actually accessed and we can defer this burden more
# easily in multiprocessing
def _initialize(self): return
@property
def exons(self):
self._initialize()
return self._exons
@property
def junctiosn(self):
self._initialize()
return self._junctions
def validate(self):
self._initialize()
# check the structure
prev = None
for exon in self.exons:
rng = exon.rng
if prev:
if rng.start > prev.end and rng.end >= rng.start:
continue
else:
return False
prev = rng
return True
def copy(self):
self._initialize()
tx_str = self.dump_serialized()
tx = Transcript()
tx.load_serialized(tx_str)
return tx
# Pre: Start base index 0
# Post: Finish base index 1
def subset(self,start,finish):
self._initialize()
# construct a new transcript
#print str(start)+' to '+str(finish)
tx = self.copy()
keep_ranges = []
index = 0
z = 0
for exon in tx.exons:
z+=1
original_rng = exon.rng
rng = exon.rng.copy()
done = False;
#print 'exon length '+str(rng.length())
if start >= index and start < index+original_rng.length(): # we are in this one
rng.start = original_rng.start+(start-index) # fix the start
#print 'fixstart '+str(original_rng.start)+' to '+str(rng.start)
if finish > index and finish <= index+original_rng.length():
rng.end = original_rng.start+(finish-index)-1
done = True
#print 'fixend '+str(original_rng.end)+' to '+str(rng.end)
if finish <= index+original_rng.length(): # we are in the last exon we need
index+= original_rng.length()
keep_ranges.append(rng)
break
if index+original_rng.length() < start: # we don't need any bases from this
index += original_rng.length()
continue # we don't use this exon
keep_ranges.append(rng)
index += original_rng.length()
if index > finish: break
if done: break
tx.set_exons_and_junctions_from_ranges(keep_ranges)
#print 'ranges:'
#for rng in keep_ranges:
# print rng
# print rng.length()
return tx
def dump_serialized(self):
self._initialize()
ln = self.get_fake_gpd_line()
return base64.b64encode(zlib.compress(pickle.dumps([ln,self._direction,self._transcript_name,self._gene_name,\
self._range,self._id,self._payload,\
self._sequence])))
def load_serialized(self,instr):
self._initialize()
vals = pickle.loads(zlib.decompress(base64.b64decode(instr)))
import Bio.Format.GPD as inGPD
gpd = inGPD.GPD(vals[0])
self.exons = gpd.exons
self.junctions = gpd.junctions
self._direction = vals[1]
self._transcript_name = vals[2]
self._gene_name = vals[3]
self._range = vals[4] # set if not chimeric
self._id = vals[5]
self._payload = vals[6]
self._sequence = vals[7]
def get_junction_string(self):
self._initialize()
if len(self.exons) < 2: return None
return ",".join([x.get_string() for x in self.junctions])
def set_payload(self,val):
self._initialize()
self._payload = [val]
def get_payload(self):
self._initialize()
return self._payload[0]
# Post: the unique python ID for this transcript
def get_id(self):
return self._id
# Post: Return the number of overlapping base pairs
# between self and tx2 transcript
def overlap_size(self,tx2):
self._initialize()
total = 0
for e1 in [x.get_range() for x in self.exons]:
for e2 in [x.get_range() for x in tx2.exons]:
total += e1.overlap_size(e2)
return total
def get_exon_count(self):
self._initialize()
return len(self.exons)
#pre use the existing exons
def set_range(self):
self._initialize()
if len(self.exons) == 0: return None # its ... nothing
chrs = list(set([x.rng.chr for x in self.exons]))
if len(chrs) > 1: return None # its chimeric
self._range = GenomicRange(chrs[0],self.exons[0].rng.start,self.exons[-1].rng.end)
def get_range(self):
self._initialize()
if self._range:
return self._range
return GenomicRange(self.exons[0].get_range().chr,self.exons[0].get_range().start,self.exons[-1].get_range().end)
def union(self,tx2): # keep direction and name of self
self._initialize()
all = []
for rng1 in [x.rng for x in self.exons]:
for rng2 in [y.rng for y in tx2.exons]:
u = rng1.union(rng2)
if u: all.append(u)
if len(all) == 0: return None
rngs = merge_ranges(all)
tx = Transcript()
tx.set_exons_and_junctions_from_ranges(rngs)
tx._direction = self._direction
tx._transcript_name = self._transcript_name
tx._gene_name = self._gene_name
return tx
# any gaps smaller than min_intron are joined
# post: returns a new transcript with gaps smoothed
def smooth_gaps(self,min_intron):
self._initialize()
tx = Transcript()
rngs = [self.exons[0].rng.copy()]
for i in range(len(self.exons)-1):
dist = self.exons[i+1].rng.start - rngs[-1].end-1
if dist >= min_intron:
rngs.append(self.exons[i+1].rng.copy())
else:
rngs[-1].end = self.exons[i+1].rng.end
tx.set_exons_and_junctions_from_ranges(rngs)
tx._direction = self._direction
tx._transcript_name = self._transcript_name
tx._gene_name = self._gene_name
return tx
# set all exons and subsequestly juntions from these exon ranges
# does not set direction of transcript
# ranges need to be ordered in target order left to right
def set_exons_and_junctions_from_ranges(self,rngs):
self._initialize()
self.exons = []
self.junctions = []
for e in rngs:
ex = Exon(GenomicRange(e.chr,e.start,e.end))
self.exons.append(ex)
self.exons[0].set_is_leftmost()
self.exons[-1].set_is_rightmost()
for i in range(0,len(self.exons)-1):
# make a junction
jx = Junction(GenomicRange(self.exons[i].rng.chr,\
self.exons[i].rng.end,\
self.exons[i].rng.end),\
GenomicRange(self.exons[i+1].rng.chr,\
self.exons[i].rng.start,\
self.exons[i+1].rng.start))
jx.set_exon_left(self.exons[i])
jx.set_exon_right(self.exons[i+1])
self.junctions.append(jx)
self.set_range()
return
def get_length(self):
self._initialize()
return sum([x.get_length() for x in self.exons])
def set_strand(self,dir):
self._initialize()
self._direction = dir
def get_strand(self):
self._initialize()
return self._direction
#greedy return the first chromosome in exon array
def get_chrom(self):
self._initialize()
if len(self.exons)==0:
sys.stderr.write("WARNING can't return chromsome with nothing here\n")
return None
return self.exons[0].get_range().chr
# Pre: A strcutre is defined
# The Sequence from the reference
def get_sequence(self,ref_dict=None):
self._initialize()
if self._sequence: return self._sequence
if not ref_dict:
sys.stderr.write("ERROR: sequence is not defined and reference is undefined\n")
sys.exit()
self.set_sequence(ref_dict)
return self._sequence
def set_sequence(self,ref_dict):
self._initialize()
strand = '+'
if not self._direction:
sys.stderr.write("WARNING: no strand information for the transcript\n")
if self._direction: strand = self._direction
chr = self.get_chrom()
seq = ''
for e in [x.get_range() for x in self.exons]:
seq += ref_dict[chr][e.start-1:e.end]
if strand == '-': seq = rc(seq)
self._sequence = seq.upper()
def get_gpd_line(self,transcript_name=None,gene_name=None,strand=None):
self._initialize()
tname = self._transcript_name
gname = self._gene_name
dir = self._direction
# check for if we just have a single name
if not tname and not gname:
if self._name:
tname = self._name
gname = self._name
if not tname: tname = transcript_name
if not gname: gname = gene_name
if not dir: dir = strand
if not tname or not gname or strand:
sys.stderr.write("ERROR: transcript name and gene name and direction must be set to output a gpd line or use get_fake_gpd_line()\n")
out = ''
out += tname + "\t"
out += gname + "\t"
out += self.exons[0].rng.chr + "\t"
out += dir + "\t"
out += str(self.exons[0].rng.start-1) + "\t"
out += str(self.exons[-1].rng.end) + "\t"
out += str(self.exons[0].rng.start-1) + "\t"
out += str(self.exons[-1].rng.end) + "\t"
out += str(len(self.exons)) + "\t"
out += str(','.join([str(x.rng.start-1) for x in self.exons]))+','+"\t"
out += str(','.join([str(x.rng.end) for x in self.exons]))+','
return out
def set_gene_name(self,name):
self._initialize()
self._gene_name = name
def get_gene_name(self):
self._initialize()
return self._gene_name
def set_transcript_name(self,name):
self._initialize()
self._transcript_name = name
def get_transcript_name(self):
self._initialize()
return self._transcript_name
def get_fake_psl_line(self,ref):
self._initialize()
e = self
mylen = 0
matches = 0
qstartslist = []
for exon in self.exons:
mylen = exon.rng.length()
matches += mylen
qstartslist.append(matches-mylen)
qstarts = ','.join([str(x) for x in qstartslist])+','
oline = str(matches)+"\t" # 1
oline += "0\t" # 2
oline += "0\t" # 3
oline += "0\t" # 4
oline += "0\t" # 5
oline += "0\t" # 6
oline += "0\t" # 7
oline += "0\t" # 8
oline += e.get_strand()+"\t" # 9
oline += e.get_transcript_name()+"\t" # 10
oline += str(matches)+"\t" # 11
oline += "0\t" # 12
oline += str(matches)+"\t" # 13
oline += e.get_chrom()+"\t" # 14
oline += str(len(ref[e.get_chrom()]))+"\t" # 15
oline += str(e.exons[0].rng.start-1)+"\t" # 16
oline += str(e.exons[-1].rng.end)+"\t" # 17
oline += str(len(e.exons))+"\t" # 18
oline += ','.join([str(e.exons[x].rng.end-(e.exons[x].rng.start-1)) for x in range(0,len(e.exons))])+','+"\t" # 19
oline += qstarts + "\t" # 20
oline += ','.join([str(x.rng.start-1) for x in e.exons])+',' # 21
return oline
def get_fake_gpd_line(self):
self._initialize()
rlen = 8
#name = ''.join(random.choice(string.letters+string.digits) for i in range(0,rlen))
name = str(self.get_id())
out = ''
out += name + "\t"
out += name + "\t"
out += self.exons[0].rng.chr + "\t"
out += '+' + "\t"
out += str(self.exons[0].rng.start-1) + "\t"
out += str(self.exons[-1].rng.end) + "\t"
out += str(self.exons[0].rng.start-1) + "\t"
out += str(self.exons[-1].rng.end) + "\t"
out += str(len(self.exons)) + "\t"
out += str(','.join([str(x.rng.start-1) for x in self.exons]))+','+"\t"
out += str(','.join([str(x.rng.end) for x in self.exons]))+','
return out
def get_junctions_string(self):
self._initialize()
return ';'.join([x.get_range_string() for x in self.junctions])
def junction_overlap(self,tx,tolerance=0):
self._initialize()
return Transcript.JunctionOverlap(self,tx,tolerance)
def exon_overlap(self,tx,multi_minover=10,multi_endfrac=0,multi_midfrac=0.8,single_minover=50,single_frac=0.5,multi_consec=True):
self._initialize()
return Transcript.ExonOverlap(self,tx,multi_minover,multi_endfrac,multi_midfrac,single_minover,single_frac,multi_consec=multi_consec)
class ExonOverlap:
def __init__(self1,tx_obj1,tx_obj2,multi_minover=10,multi_endfrac=0,multi_midfrac=0.8,single_minover=50,single_frac=0.5,multi_consec=True):
self1.tx_obj1 = tx_obj1
self1.tx_obj2 = tx_obj2
self1.multi_minover = multi_minover # multi-exon minimum overlap of each exon
self1.multi_endfrac = multi_endfrac # multi-exon minimum fractional overlap of first or last exon
self1.multi_midfrac = multi_midfrac # multi-exon minimum fractional overlap of internal exons
self1.multi_consec = multi_consec # require consecutive exons for exon overlap of multi_exon
self1.single_minover = single_minover # single-exon minimum overlap
self1.single_frac = single_frac #single-exon minimum overlap
self1.overs = [] # set by calculate_overlap()
#self1.dif1 = []
#self1.dif2 = []
self1.calculate_overlap()
if len(self1.overs) == 0: return None# nothing to analyze
if self1.tx_obj1.get_exon_count() > 1 and self1.tx_obj1.get_exon_count() > 1 \
and self1.multi_consec and len(self1.overs) < 2:
return None #not enough to consider multi exon transcript overlap
self1.analyze_overs()
if self1.tx_obj1.get_exon_count() > 1 and self1.tx_obj1.get_exon_count() > 1 \
and self1.multi_consec and (min(self1.dif1) != 1 or min(self1.dif2) !=1):
return None #not enough to consider multi exon transcript overlap
def __nonzero__(self1):
if len(self1.overs) > 0: return True
return False
def match_exon_count(self1):
return len(self1.overs)
def consecutive_exon_count(self1):
best = 1
consec = 1
for i in range(0,len(self1.dif1)):
if self1.dif1[i] == 1 and self1.dif2[i] == 1:
consec += 1
else:
consec = 1
if consec > best:
best = consec
return best
# Return value if tx_obj2 is a complete subset of tx_obj1 or tx_obj1 is a complete subset of tx_obj2
# Return 1: Full overlap (mutual subests)
# Return 2: two is a subset of one
# Return 3: one is a subset of two
# Return False if neither is a subset of the other
def is_subset(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0: # make sure they are consecutive if more than one
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
onecov = self1.start1 and self1.end1
twocov = self1.start2 and self1.end2
if onecov and twocov:
return 1
elif twocov: return 2
elif onecov: return 3
return False
def is_full_overlap(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
if self1.start1 and self1.end1 and self1.start2 and self1.end2:
return True
return False
# Return True if the transcripts can be combined together
def is_compatible(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
# If we are still here it is a single run
if (self1.start1 or self1.start2) and (self1.end1 or self1.end2):
return True
return False
def analyze_overs(self1):
#check for full overlap first
self1.dif1 = [self1.overs[i][0]-self1.overs[i-1][0] for i in range(1,len(self1.overs))]
self1.dif2 = [self1.overs[i][1]-self1.overs[i-1][1] for i in range(1,len(self1.overs))]
#see if it starts and ends on first or last junction
self1.start1 = self1.overs[0][0] == 0
self1.start2 = self1.overs[0][1] == 0
self1.end1 = self1.overs[-1][0] == len(self1.tx_obj1.exons)-1
self1.end2 = self1.overs[-1][1] == len(self1.tx_obj2.exons)-1
return
#Create the array that describes how junctions overlap
def calculate_overlap(self1):
overs = []
if not self1.tx_obj1.get_range().overlaps(self1.tx_obj2.get_range()): return # if they dont overlap wont find anything
for i in range(0,len(self1.tx_obj1.exons)):
for j in range(0,len(self1.tx_obj2.exons)):
osize = self1.tx_obj1.exons[i].rng.overlap_size(self1.tx_obj2.exons[j].rng)
ofrac = 0
if osize > 0:
ofrac = min(float(osize)/float(self1.tx_obj1.exons[i].rng.length())\
,float(osize)/float(self1.tx_obj2.exons[j].rng.length()))
if self1.tx_obj1.get_exon_count() == 1 or self1.tx_obj2.get_exon_count == 1:
# use single exon rules
if osize >= self1.single_minover and ofrac >= self1.single_frac:
#print 'single exon match'
overs.append([i,j])
else: # for multi exons
if i == 0 or j == 0 or i == len(self1.tx_obj1.exons)-1 or j == len(self1.tx_obj2.exons)-1:
#its on an end
if osize >= self1.multi_minover and ofrac >= self1.multi_endfrac:
#print 'end exon match'
overs.append([i,j])
#else its a middle
elif osize >= self1.multi_minover and ofrac >= self1.multi_midfrac:
#print 'mid exon match'
overs.append([i,j])
#print overs
self1.overs = overs
class JunctionOverlap:
def __init__(self1,tx_obj1,tx_obj2,tolerance=0):
self1.tx_obj1 = tx_obj1
self1.tx_obj2 = tx_obj2
self1.tolerance = tolerance
self1.overs = [] # gets set by calculate_overlap()
self1.calculate_overlap()
if len(self1.overs) == 0: return None# nothing to analyze
self1.analyze_overs()
def __nonzero__(self1):
if len(self1.overs) > 0: return True
return False
def match_junction_count(self1):
return len(self1.overs)
# Return value if tx_obj2 is a complete subset of tx_obj1 or tx_obj1 is a complete subset of tx_obj2
# Return 1: Full overlap (mutual subests)
# Return 2: two is a subset of one
# Return 3: one is a subset of two
# Return False if neither is a subset of the other
def is_subset(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0: # make sure they are consecutive if more than one
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
onecov = self1.start1 and self1.end1
twocov = self1.start2 and self1.end2
if onecov and twocov:
return 1
elif twocov: return 2
elif onecov: return 3
return False
def is_full_overlap(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
if self1.start1 and self1.end1 and self1.start2 and self1.end2:
return True
return False
# Return True if the transcripts can be combined together
def is_compatible(self1):
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
# If we are still here it is a single run
if (self1.start1 or self1.start2) and (self1.end1 or self1.end2):
return True
return False
def analyze_overs(self1):
#check for full overlap first
self1.dif1 = [self1.overs[i][0]-self1.overs[i-1][0] for i in range(1,len(self1.overs))]
self1.dif2 = [self1.overs[i][1]-self1.overs[i-1][1] for i in range(1,len(self1.overs))]
#see if it starts and ends on first or last junction
self1.start1 = self1.overs[0][0] == 0
self1.start2 = self1.overs[0][1] == 0
self1.end1 = self1.overs[-1][0] == len(self1.tx_obj1.junctions)-1
self1.end2 = self1.overs[-1][1] == len(self1.tx_obj2.junctions)-1
return
#Create the array that describes how junctions overlap
def calculate_overlap(self1):
overs = []
if not self1.tx_obj1.get_range().overlaps(self1.tx_obj2.get_range()): return # if they dont overlap wont find anything
for i in range(0,len(self1.tx_obj1.junctions)):
for j in range(0,len(self1.tx_obj2.junctions)):
if self1.tx_obj1.junctions[i].overlaps(self1.tx_obj2.junctions[j],self1.tolerance):
overs.append([i,j])
self1.overs = overs
class Junction:
def __init__(self,rng_left=None,rng_right=None):
self.left = rng_left
self.right = rng_right
self.left_exon = None
self.right_exon = None
def dump_serialized(self):
return pickle.dumps(self)
def load_serialized(self,instr):
self = pickle.loads(instr)
def get_string(self):
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start)
def get_left_exon(self):
return self.left_exon
def get_right_exon(self):
return self.right_exon
def get_range_string(self):
return self.left.chr+":"+str(self.left.end)+'/'+self.right.chr+":"+str(self.right.start)
def set_left(self,rng):
self.left = rng
def set_right(self,rng):
self.right = rng
#test equality with another junction
def equals(self,junc):
if self.left.equals(junc.left): return False
if self.right.equals(junc.right): return False
return True
# see if junction overlaps with tolerance
def overlaps(self,junc,tolerance=0):
if not self.left.overlaps_with_padding(junc.left,tolerance): return False
if not self.right.overlaps_with_padding(junc.right,tolerance): return False
return True
#output
# -1 if junc comes before self
# 1 if junc comes after self
# 0 if overlaps
# 2 if else
def cmp(self,junc,tolerance=0):
if self.overlaps(junc,tolerance):
return 0 #equal
if self.left.chr == junc.right.chr:
if self.left.start > junc.right.start:
return -1 #comes before
if self.right.chr == junc.left.chr:
if self.right.start < junc.right.start:
return 1 #comes after
return 2
def set_exon_left(self,ex):
self.left_exon = ex
ex.right_junc = self
def set_exon_right(self,ex):
self.right_exon = ex
ex.left_junc = self
class Exon:
def __init__(self,rng=None):
self.rng = rng
self.left_junc = None
self.right_junc = None
self._is_leftmost = False #bool is it a start or end
self._is_rightmost = False
def dump_serialized(self):
return pickle.dumps(self)
def load_serialized(self,instr):
self = pickle.loads(instr)
def get_range(self):
return self.rng
def get_length(self):
return self.rng.length()
def set_left_junc(self,junc):
self.left_junc = junc
junc.set_right_exon = self
def set_right_junc(self,junc):
self.right_junc = junc
junc.set_left_exon = self
def set_is_leftmost(self,boo=True): self._is_leftmost = boo # is leftmost
def set_is_rightmost(self,boo=True): self._is_rightmost = boo #is rightmost
# combine together compatible multiple transcript groups to form
# a simpler set of transcripts
class TranscriptLoci:
def __init__(self):
#self.transcripts = []
self.merge_rules = TranscriptLociMergeRules('is_any_overlap')
self.merge_rules.set_juntol(10)
self.g = Bio.Graph.Graph()
def __str__(self):
return str(len(self.g.get_nodes()))+ " nodes"
def remove_transcript(self,tx_id):
txs = self.get_transcripts()
if tx_id not in [x.get_id() for x in txs]:
return
tx = [x for x in txs if x.get_id()==tx_id][0]
for n in [x for x in self.g.get_nodes()]:
if tx_id not in [y.get_id() for y in n.get_payload()]:
continue
n.get_payload().remove(tx)
if len(n.get_payload())==0:
self.g.remove_node(n)
#sys.stderr.write("\n"+str(n.get_payload())+"\n")
#
#sys.stderr.write("\n"+str(len(n.get_payload()))+"\n")
def set_merge_rules(self,mr): self.merge_rules = mr
# using all the transcripts find the depth
def get_depth_per_transcript(self,mindepth=1):
bedarray = []
for tx in self.get_transcripts():
for ex in [x.rng for x in tx.exons]: bedarray.append(ex)
cov = ranges_to_coverage(bedarray)
results = {}
for tx in self.get_transcripts():
tlen = tx.get_length()
bcov = []
for ex in [x.rng for x in tx.exons]:
excov = [[x.overlap_size(ex),x.get_payload()] for x in cov]
for coved in [x for x in excov if x[0] > 0]:
bcov.append(coved)
total_base_coverage = sum([x[0]*x[1] for x in bcov])
average_coverage = float(total_base_coverage)/float(tlen)
minimum_bases_covered = sum([x[0] for x in bcov if x[1] >= mindepth])
fraction_covered_at_minimum = float(minimum_bases_covered)/float(tlen)
res = {'tx':tx,'average_coverage':average_coverage,'fraction_covered':fraction_covered_at_minimum,'mindepth':mindepth,'length_covered':minimum_bases_covered}
results[tx.get_id()] = res
#print average_coverage
#print fraction_covered_at_minimum
#print tlen
#tcov = float(bcov)/float(tlen)
#print tcov
#for c in cov:
# print c
return results
def get_range(self):
chrs = set([x.get_range().chr for x in self.get_transcripts()])
if len(chrs) != 1: return None
start = min([x.get_range().start for x in self.get_transcripts()])
end = max([x.get_range().end for x in self.get_transcripts()])
return GenomicRange(list(chrs)[0],start,end)
def get_transcripts(self):
txs = []
for pays in [x.get_payload() for x in self.g.get_nodes()]:
for pay in pays:
txs.append(pay)
return txs
def partition_loci(self,verbose=False):
#names = []
#for entries in [x.get_payload() for x in self.g.get_nodes()]:
# for entry in entries:
# names.append(entry.get_gene_name())
#sys.stderr.write('-------partition_loci-----'+"\n")
#sys.stderr.write(self.g.get_report()+"\n")
self.g.merge_cycles()
#sys.stderr.write(self.g.get_report()+"\n")
gs = self.g.partition_graph(verbose=verbose)
tls = [] # makea list of transcript loci
for g in gs:
tl = TranscriptLoci()
tl.merge_rules = self.merge_rules
ns = g.get_nodes()
for n in [x.get_payload() for x in ns]:
for tx in n:
tl.add_transcript(tx)
if len(tl.g.get_nodes()) > 0:
tls.append(tl)
#print '-----------------------'
#names = []
#for tl in tls:
# for tx in tl.get_transcripts():
# names.append(tx.get_gene_name())
#for name in sorted(names):
# print name
#print '--------------------------'
return tls
def add_transcript(self,tx):
for y in [x.get_payload() for x in self.g.get_nodes()]:
if tx.get_id in [z.get_id() for z in y]:
#if tx.get_id() in [[y.get_id() for y in x.get_payload()] for x in self.g.get_nodes()]:
sys.stderr.write("WARNING tx is already in graph\n")
return True
# transcript isn't part of graph yet
n = Bio.Graph.Node([tx])
other_nodes = self.g.get_nodes()
self.g.add_node(n)
# now we need to see if its connected anywhere
for n2 in other_nodes:
tx2s = n2.get_payload()
for tx2 in tx2s:
# do exon overlap
er = self.merge_rules.get_exon_rules()
# if we are doing things by exon
if (self.merge_rules.get_use_single_exons() and (tx.get_exon_count() == 1 or tx2.get_exon_count() == 1)) or \
(self.merge_rules.get_use_multi_exons() and (tx.get_exon_count() > 1 and tx2.get_exon_count() > 1)):
eo = tx.exon_overlap(tx2,multi_minover=er['multi_minover'],multi_endfrac=er['multi_endfrac'],multi_midfrac=er['multi_midfrac'],single_minover=er['single_minover'],single_frac=er['single_frac'])
if self.merge_rules.get_merge_type() == 'is_compatible':
if eo.is_compatible():
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_subset':
r = eo.is_subset()
if r == 2 or r == 1:
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
if r == 3 or r == 1:
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_full_overlap':
if eo.is_full_overlap():
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_any_overlap':
if eo.match_exon_count() > 0:
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
if self.merge_rules.get_use_junctions():
# do junction overlap
jo = tx.junction_overlap(tx2,self.merge_rules.get_juntol())
#print jo.match_junction_count()
if self.merge_rules.get_merge_type() == 'is_compatible':
if jo.is_compatible():
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_subset':
r = jo.is_subset()
if r == 2 or r == 1:
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
if r == 3 or r == 1:
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_full_overlap':
if jo.is_full_overlap():
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
elif self.merge_rules.get_merge_type() == 'is_any_overlap':
if jo.match_junction_count() > 0:
self.g.add_edge(Bio.Graph.Edge(n,n2),verbose=False)
self.g.add_edge(Bio.Graph.Edge(n2,n),verbose=False)
return True
#def add_transcript_group(self,txg):
# self.transcript_groups.append(txg)
#def merge_down_loci(self):
# # look at the transcript groups that are currently there
# # check for full match
#
# return
# TranscriptLocus Merge Rules
class TranscriptLociMergeRules:
def __init__(self,merge_type):
#Multi-exon rules
self._junction_tolerance = 10
self._possible_types = set(['is_subset','is_compatible','is_full_overlap','is_any_overlap'])
if merge_type not in self._possible_types:
sys.stderr.write("ERROR: "+merge_type+" is not a known merge type\n")
sys.exit()
self._merge_type = merge_type
self._use_junctions = True
# exon rules
self._use_multi_exons = True
self._use_single_exons = True
self._multi_minover=10
self._multi_endfrac=0
self._multi_midfrac=0.8
self._single_minover=100
self._single_frac=0.5
return
def get_use_single_exons(self): return self._use_single_exons
def get_use_multi_exons(self): return self._use_multi_exons
def get_use_junctions(self): return self._use_junctions
def get_exon_rules(self):
return {'multi_minover':self._multi_minover,'multi_endfrac':self._multi_endfrac,'multi_midfrac':self._multi_midfrac,'single_minover':self._single_minover,'single_frac':self._single_frac}
def get_merge_type(self):
return self._merge_type
def set_juntol(self,juntol):
self._junction_tolerance = juntol
def get_juntol(self):
return self._junction_tolerance
def set_use_junctions(self,boo=True):
self._use_junctions = boo
# A transcript group is like the fuzzy gpd class we had before
class TranscriptGroup:
def __init__(self):
self.junction_groups = [] # These will be more fuzzy defitions
#self.exons = [] # These will be based on the junctions and individual starts
self.transcripts = [] # These are the individual transcripts that make up this group
self._transcript_ids = set()
# Return a representative transcript object
def get_transcript(self,exon_bounds='max'):
out = Transcript()
out.junctions = [x.get_junction() for x in self.junction_groups]
# check for single exon transcript
if len(out.junctions) == 0:
leftcoord = min([x.exons[0].rng.start for x in self.transcripts])
rightcoord = max([x.exons[-1].rng.end for x in self.transcripts])
e = Exon(GenomicRange(x.exons[0].rng.chr,leftcoord,rightcoord))
e.set_is_leftmost()
e.set_is_rightmost()
out.exons.append(e)
return out
# get internal exons
self.exons = []
for i in range(0,len(self.junction_groups)-1):
j1 = self.junction_groups[i].get_junction()
j2 = self.junction_groups[i+1].get_junction()
e = Exon(GenomicRange(j1.right.chr,j1.right.end,j2.left.start))
e.set_left_junc(j1)
e.set_right_junc(j2)
#print str(i)+" to "+str(i+1)
out.exons.append(e)
# get left exon
left_exons = [y for y in [self.transcripts[e[0]].junctions[e[1]].get_left_exon() for e in self.junction_groups[0].evidence] if y]
if len(left_exons) == 0:
sys.stderr.write("ERROR no left exon\n")
sys.exit()
e_left = Exon(GenomicRange(out.junctions[0].left.chr,\
min([x.get_range().start for x in left_exons]),
out.junctions[0].left.start))
e_left.set_right_junc(out.junctions[0])
out.exons.insert(0,e_left)
# get right exon
right_exons = [y for y in [self.transcripts[e[0]].junctions[e[1]].get_right_exon() for e in self.junction_groups[-1].evidence] if y]
if len(right_exons) == 0:
sys.stderr.write("ERROR no right exon\n")
sys.exit()
e_right = Exon(GenomicRange(out.junctions[-1].right.chr,\
out.junctions[-1].right.end,\
max([x.get_range().end for x in right_exons])))
e_right.set_left_junc(out.junctions[-1])
out.exons.append(e_right)
return out
def add_transcript(self,tx,juntol=0,verbose=True):
if tx.get_id() in self._transcript_ids: return True
# check existing transcripts for compatability
for t in self.transcripts:
ov = t.junction_overlap(tx,juntol)
if ov:
if not ov.is_compatible():
if verbose: sys.stderr.write("transcript is not compatible\n")
return False
else:
if verbose: sys.stderr.write("transcript is not overlapped\n")
return False # if its not overlapped we also can't add
self.transcripts.append(tx)
curr_tx = len(self.transcripts)-1
#print curr_tx
# see if there is no junctions yet
if len(self.junction_groups) == 0:
for i in range(0,len(tx.junctions)):
jg = TranscriptGroup.JunctionGroup(self)
jg.add_junction(curr_tx,i,tolerance=juntol)
self.junction_groups.append(jg)
else: # there is already a transcript(s) here to work around
before = []
middle = []
after = []
for j in range(0,len(tx.junctions)):
# see if its before the existing set
cmp = self.junction_groups[0].get_junction().cmp(tx.junctions[j])
if cmp == -1: before.append(j)
# see if it goes in the existing set
for k in range(0,len(self.junction_groups)):
ov = self.junction_groups[k].get_junction().overlaps(tx.junctions[j],tolerance=juntol) #may need to add a tolerance
if ov: middle.append([j,k])
# see if it goes after this set
cmp = self.junction_groups[-1].get_junction().cmp(tx.junctions[j])
if cmp == 1: after.append(j)
# add to the middle values before we disrupt indexing
#print '---'
#print len(before)
#print len(middle)
#print len(after)
#print '---'
for v in middle:
self.junction_groups[v[1]].add_junction(curr_tx,v[0],tolerance=juntol)
#add to the beginning and then the end
for i in reversed(before):
jg = TranscriptGroup.JunctionGroup(self)
jg.add_junction(curr_tx,i,tolerance=juntol)
self.junction_groups.insert(0,jg)
for i in after:
jg = TranscriptGroup.JunctionGroup(self)
jg.add_junction(curr_tx,i,tolerance=juntol)
self.junction_groups.append(jg)
#if len(tx.junctions)==0:
# jg = TranscriptGroup.JunctionGroup(self)
# jg.add_junction(curr_tx,i)
# self.junctions.append(jg)
self._transcript_ids.add(tx.get_id())
return True
class JunctionGroup:
def __init__(self1,outer):
self1.outer = outer
self1.evidence = [] # array of evidence that is the
# outer.transcript index
# outer.trascript.junction index
self1.representative_junction = None #calculated as needed
def get_junction(self1): # return the consensus junction
if self1.representative_junction:
return self1.representative_junction
left_rngs = []
right_rngs = []
for j in [self1.outer.transcripts[x[0]].junctions[x[1]] for x in self1.evidence]:
left_rngs.append(j.left)
right_rngs.append(j.right)
left = _mode([x.end for x in left_rngs])
right = _mode([x.start for x in right_rngs])
outj = Junction(GenomicRange(left_rngs[0].chr,left,left),GenomicRange(right_rngs[0].chr,right,right))
self1.representative_junction = outj
return outj
def add_junction(self1,tx_index,junc_index,tolerance=0):
self1.representative_junction = None
if len(self1.evidence)==0:
# go ahead and add it
#j = self1.outer.transcripts[tx_index].junctions[junc_index]
self1.evidence.append([tx_index,junc_index])
else:
# check it and add it
if not self1.get_junction().overlaps(self1.outer.transcripts[tx_index].junctions[junc_index],tolerance=tolerance):
sys.stderr.write("WARNING Unable to add junction JunctionGroup\n"+self1.get_junction().get_range_string()+"\n"+self1.outer.transcripts[tx_index].junctions[junc_index].get_range_string()+"\n")
return False
self1.evidence.append([tx_index,junc_index])
def _mode(mylist):
counts = [mylist.count(x) for x in mylist]
maxcount = max(counts)
avg = sum([float(x) for x in mylist])/len(mylist)
#print counts
dist = [abs(float(x)-avg) for x in mylist]
best_list = []
best_dist = []
for i in range(0,len(mylist)):
counts[i] == maxcount
best_list.append(mylist[i])
best_dist.append(dist[i])
abs_best_dist = min(best_dist)
for i in range(0,len(best_dist)):
if best_dist[i] == abs_best_dist:
return best_list[i]
sys.stderr.write("Warning: trouble finding best\n")
return best_list[0]
class Transcriptome:
def __init__(self,gpd_file=None,ref_fasta=None):
self.transcripts = []
if gpd_file:
from Bio.Format.GPD import GPD
with open(gpd_file) as inf:
for line in inf:
self.transcripts.append(GPD(line))
if ref_fasta:
for i in range(0,len(self.transcripts)):
self.transcripts[i].get_sequence(ref_fasta)
def dump_serialized(self):
sx = base64.b64encode(zlib.compress(pickle.dumps([x.dump_serialized() for x in self.transcripts])))
return sx
def load_serialized(self,instr):
txs = []
for v in pickle.loads(zlib.decompress(base64.b64decode(instr))):
tx = Transcript()
tx.load_serialized(v)
txs.append(tx)
self.transcripts = txs
def get_transcripts(self):
return self.transcripts
def add_transcript(self,transcript):
self.transcripts.append(transcript)
def __str__(self):
ostr = ''
ostr += "Transcriptome containing "+str(len(self.transcripts))+" transcripts "
ostr += "covering "+str(sum([x.get_length() for x in self.transcripts]))+" bases"
return ostr
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstantValueOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist='x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]]),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes.
#
# Note: numpy has different default/inferred types than tensorflow.
# Since we are using values, not tensors, we get the default numpy types
# here.
dict(pylist=[], expected_dtype=np.float64),
dict(pylist=[[[], [[[]], []]]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=np.int64),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=np.float64),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=np.dtype('S1')),
dict(pylist=[[True]], expected_dtype=np.bool),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=np.float64),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=np.float32),
dict(pylist=[], dtype=np.dtype('S1')),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=np.dtype('S1')),
)
def testRaggedValues(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_value(pylist).to_list() == pylist`."""
rt = ragged_factory_ops.constant_value(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = self._normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if dtype is not None:
self.assertEqual(rt.dtype, dtype)
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.flat_values.shape[1:], inner_shape)
else:
self.assertEqual(rt.shape, inner_shape)
if expected_shape is not None:
self.assertEqual(tuple(rt.shape), expected_shape)
if rt.shape:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.to_list(), pylist)
else:
self.assertEqual(rt.tolist(), pylist)
if expected_shape is not None:
self.assertEqual(rt.shape, expected_shape)
else:
self.assertEqual(rt, pylist)
if expected_shape is not None:
self.assertEqual((), expected_shape)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=np.array(12),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=array\\(12\\): incompatible with '
'ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(
pylist=[1, 2, 3],
inner_shape=(1, 1),
exception=ValueError,
message='cannot reshape array'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedValuesError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `constant_value()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant_value,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
if __name__ == '__main__':
googletest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras generic Python utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.platform import test
class HasArgTest(test.TestCase):
def test_has_arg(self):
def f_x(x):
return x
def f_x_args(x, *args):
_ = args
return x
def f_x_kwargs(x, **kwargs):
_ = kwargs
return x
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_args, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_args, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=True))
class TestCustomObjectScope(test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass(object):
pass
with keras.utils.generic_utils.custom_object_scope(
{'CustomClass': CustomClass, 'custom_fn': custom_fn}):
act = keras.activations.get('custom_fn')
self.assertEqual(act, custom_fn)
cl = keras.regularizers.get('CustomClass')
self.assertEqual(cl.__class__, CustomClass)
class SerializeKerasObjectTest(test.TestCase):
def test_serialize_none(self):
serialized = keras.utils.generic_utils.serialize_keras_object(None)
self.assertEqual(serialized, None)
deserialized = keras.utils.generic_utils.deserialize_keras_object(
serialized)
self.assertEqual(deserialized, None)
def test_serialize_custom_class_with_default_name(self):
@keras.utils.generic_utils.register_keras_serializable()
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
serialized_name = 'Custom>TestClass'
inst = TestClass(value=10)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
# Make sure registering a new class with same name will fail.
with self.assertRaisesRegex(ValueError, '.*has already been registered.*'):
@keras.utils.generic_utils.register_keras_serializable() # pylint: disable=function-redefined
class TestClass(object):
def __init__(self, value):
self._value = value
def get_config(self):
return {'value': self._value}
def test_serialize_custom_class_with_custom_name(self):
@keras.utils.generic_utils.register_keras_serializable(
'TestPackage', 'CustomName')
class OtherTestClass(object):
def __init__(self, val):
self._val = val
def get_config(self):
return {'val': self._val}
serialized_name = 'TestPackage>CustomName'
inst = OtherTestClass(val=5)
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(inst)
self.assertEqual(class_name, config['class_name'])
new_inst = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@keras.utils.generic_utils.register_keras_serializable()
def my_fn():
return 42
serialized_name = 'Custom>my_fn'
class_name = keras.utils.generic_utils._GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
config = keras.utils.generic_utils.serialize_keras_object(my_fn)
self.assertEqual(class_name, config)
fn = keras.utils.generic_utils.deserialize_keras_object(config)
self.assertEqual(42, fn())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError, 'Cannot register a class that does '
'not have a get_config.*'):
@keras.utils.generic_utils.register_keras_serializable( # pylint: disable=unused-variable
'TestPackage', 'TestClass')
class TestClass(object):
def __init__(self, value):
self._value = value
def test_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableInt(3),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config, custom_objects={'SerializableInt': SerializableInt})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableInt)
self.assertEqual(new_layer.units, 3)
def test_nested_serializable_object(self):
class SerializableInt(int):
"""A serializable object to pass out of a test layer's config."""
def __new__(cls, value):
return int.__new__(cls, value)
def get_config(self):
return {'value': int(self)}
@classmethod
def from_config(cls, config):
return cls(**config)
class SerializableNestedInt(int):
"""A serializable object containing another serializable object."""
def __new__(cls, value, int_obj):
obj = int.__new__(cls, value)
obj.int_obj = int_obj
return obj
def get_config(self):
return {'value': int(self), 'int_obj': self.int_obj}
@classmethod
def from_config(cls, config):
return cls(**config)
nested_int = SerializableInt(4)
layer = keras.layers.Dense(
SerializableNestedInt(3, nested_int),
name='SerializableNestedInt',
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'SerializableInt': SerializableInt,
'SerializableNestedInt': SerializableNestedInt
})
# Make sure the string field doesn't get convert to custom object, even
# they have same value.
self.assertEqual(new_layer.name, 'SerializableNestedInt')
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertEqual(new_layer.bias_regularizer.__class__,
keras.regularizers.L1L2)
self.assertEqual(new_layer.units.__class__, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertEqual(new_layer.units.int_obj.__class__, SerializableInt)
self.assertEqual(new_layer.units.int_obj, 4)
def test_nested_serializable_fn(self):
def serializable_fn(x):
"""A serializable function to pass out of a test layer's config."""
return x
class SerializableNestedInt(int):
"""A serializable object containing a serializable function."""
def __new__(cls, value, fn):
obj = int.__new__(cls, value)
obj.fn = fn
return obj
def get_config(self):
return {'value': int(self), 'fn': self.fn}
@classmethod
def from_config(cls, config):
return cls(**config)
layer = keras.layers.Dense(
SerializableNestedInt(3, serializable_fn),
activation='relu',
kernel_initializer='ones',
bias_regularizer='l2')
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
config,
custom_objects={
'serializable_fn': serializable_fn,
'SerializableNestedInt': SerializableNestedInt
})
self.assertEqual(new_layer.activation, keras.activations.relu)
self.assertIsInstance(new_layer.bias_regularizer, keras.regularizers.L1L2)
self.assertIsInstance(new_layer.units, SerializableNestedInt)
self.assertEqual(new_layer.units, 3)
self.assertIs(new_layer.units.fn, serializable_fn)
class SliceArraysTest(test.TestCase):
def test_slice_arrays(self):
input_a = list([1, 2, 3])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, stop=3),
[None, None, None])
self.assertEqual(
keras.utils.generic_utils.slice_arrays(input_a, start=0, stop=1),
[None, None, None])
if __name__ == '__main__':
test.main()
|
|
"""Code to wrap some GLOO API calls."""
import numpy
import asyncio
try:
import pygloo
except ImportError:
raise ImportError("Can not import pygloo."
"Please run 'pip install pygloo' to install pygloo.")
import ray
from ray.util.collective.types import ReduceOp, torch_available
from ray.util.queue import _QueueActor
GLOO_REDUCE_OP_MAP = {
ReduceOp.SUM: pygloo.ReduceOp.SUM,
ReduceOp.PRODUCT: pygloo.ReduceOp.PRODUCT,
ReduceOp.MIN: pygloo.ReduceOp.MIN,
ReduceOp.MAX: pygloo.ReduceOp.MAX,
}
NUMPY_GLOO_DTYPE_MAP = {
# INT types
numpy.int: pygloo.glooDataType_t.glooInt64,
numpy.uint8: pygloo.glooDataType_t.glooUint8,
numpy.uint32: pygloo.glooDataType_t.glooUint32,
numpy.uint64: pygloo.glooDataType_t.glooUint64,
numpy.int8: pygloo.glooDataType_t.glooInt8,
numpy.int32: pygloo.glooDataType_t.glooInt32,
numpy.int64: pygloo.glooDataType_t.glooInt64,
# FLOAT types
numpy.half: pygloo.glooDataType_t.glooFloat16,
numpy.float: pygloo.glooDataType_t.glooFloat64,
numpy.float16: pygloo.glooDataType_t.glooFloat16,
numpy.float32: pygloo.glooDataType_t.glooFloat32,
numpy.float64: pygloo.glooDataType_t.glooFloat64,
numpy.double: pygloo.glooDataType_t.glooFloat64,
}
if torch_available():
import torch
TORCH_GLOO_DTYPE_MAP = {
torch.int: pygloo.glooDataType_t.glooInt32,
torch.uint8: pygloo.glooDataType_t.glooUint8,
torch.int8: pygloo.glooDataType_t.glooInt8,
torch.int32: pygloo.glooDataType_t.glooInt32,
torch.int64: pygloo.glooDataType_t.glooInt64,
torch.long: pygloo.glooDataType_t.glooInt64,
# FLOAT types
torch.half: pygloo.glooDataType_t.glooFloat16,
torch.float: pygloo.glooDataType_t.glooFloat32,
torch.float16: pygloo.glooDataType_t.glooFloat16,
torch.float32: pygloo.glooDataType_t.glooFloat32,
torch.float64: pygloo.glooDataType_t.glooFloat64,
torch.double: pygloo.glooDataType_t.glooFloat64,
}
TORCH_NUMPY_DTYPE_MAP = {
# INT types
torch.int: numpy.int32,
torch.uint8: numpy.uint8,
torch.int8: numpy.int8,
torch.int32: numpy.int32,
torch.int64: numpy.int64,
torch.long: numpy.int64,
# FLOAT types
torch.half: numpy.half,
torch.float: numpy.float32,
torch.float16: numpy.float16,
torch.float32: numpy.float32,
torch.float64: numpy.float64,
}
def create_gloo_context(rank, world_size):
"""Create a GLOO context using GLOO APIs.
Args:
rank (int): the rank of this process.
world_size (int): the number of processes of this collective group.
Returns:
context (pygloo.Context): a GLOO context.
"""
context = pygloo.rendezvous.Context(rank, world_size)
return context
def get_gloo_reduce_op(reduce_op):
"""Map the reduce op to GLOO reduce op type.
Args:
reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).
Returns:
(pygloo.ReduceOp): the mapped GLOO reduce op.
"""
if reduce_op not in GLOO_REDUCE_OP_MAP:
raise RuntimeError(
"Gloo does not support reduce op: '{}'.".format(reduce_op))
return GLOO_REDUCE_OP_MAP[reduce_op]
def get_gloo_tensor_dtype(tensor):
"""Return the corresponded GLOO dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return NUMPY_GLOO_DTYPE_MAP[tensor.dtype.type]
if torch_available():
if isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return TORCH_GLOO_DTYPE_MAP[tensor.dtype]
else:
raise ValueError("Expect torch CPU tensor. "
"Got {}.".format(tensor.device))
raise ValueError("Unsupported tensor type. "
"Got: {}.".format(type(tensor)))
def get_numpy_tensor_dtype(tensor):
"""Return the corresponded Cupy dtype given a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.dtype.type
if torch_available():
if isinstance(tensor, torch.Tensor):
return TORCH_NUMPY_DTYPE_MAP[tensor.dtype]
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def get_tensor_ptr(tensor):
"""Return the pointer to the underlying memory storage of a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.ctypes.data
if torch_available():
if isinstance(tensor, torch.Tensor):
if tensor.is_cuda:
raise RuntimeError("Torch tensor must be on CPU "
"when using GLOO collectives.")
return tensor.data_ptr()
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def get_tensor_n_elements(tensor):
"""Return the number of elements in a tensor."""
if isinstance(tensor, numpy.ndarray):
return tensor.size
if torch_available():
if isinstance(tensor, torch.Tensor):
return torch.numel(tensor)
raise ValueError("Unsupported tensor type. "
"Got: {}.".format(type(tensor)))
def get_gloo_store_path(store_name):
from ray._private.utils import get_ray_temp_dir
store_path = f"{get_ray_temp_dir()}_collective/gloo/{store_name}"
return store_path
def get_tensor_device(tensor):
if isinstance(tensor, numpy.ndarray):
return "cpu"
elif torch_available() and isinstance(tensor, torch.Tensor):
if not tensor.is_cuda:
return "cpu"
else:
return "cuda"
else:
raise RuntimeError("Unrecognized tensor type: "
"'{}'.".format(type(tensor)))
def get_tensor_shape(tensor):
"""Return the shape of the tensor as a list."""
if isinstance(tensor, numpy.ndarray):
return list(tensor.shape)
if torch_available():
if isinstance(tensor, torch.Tensor):
return list(tensor.size())
raise ValueError("Unsupported tensor type. Got: {}. Supported "
"CPU tensor types are: torch.Tensor, "
"numpy.ndarray.".format(type(tensor)))
def copy_tensor(dst_tensor, src_tensor):
"""Copy the content from src_tensor to dst_tensor.
Args:
dst_tensor: the tensor to copy from.
src_tensor: the tensor to copy to.
Returns:
None
"""
copied = True
if isinstance(dst_tensor, numpy.ndarray) \
and isinstance(src_tensor, numpy.ndarray):
numpy.copyto(dst_tensor, src_tensor)
elif torch_available():
if isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, torch.Tensor):
dst_tensor.copy_(src_tensor)
elif isinstance(dst_tensor, torch.Tensor) and isinstance(
src_tensor, numpy.ndarray):
t = torch.Tensor(src_tensor)
dst_tensor.copy_(t)
elif isinstance(dst_tensor, numpy.ndarray) and isinstance(
src_tensor, torch.Tensor):
t = src_tensor.numpy()
numpy.copyto(dst_tensor, t)
else:
copied = False
else:
copied = False
if not copied:
raise ValueError("Unsupported tensor type. Got: {} and {}. Supported "
"CPU tensor types are: torch.Tensor, numpy.ndarray."
.format(type(dst_tensor), type(src_tensor)))
# Note(Hao): this requires Ray >= 1.2.0,
# otherwise _QueueActor is an actor class.
class glooQueue(_QueueActor):
def index(self, group_name):
try:
return self.queue._queue.index(group_name)
except ValueError:
return -1
@ray.remote(num_cpus=0)
class SignalActor:
def __init__(self, world_size):
self.ready_events = [asyncio.Event() for _ in range(world_size)]
self.world_size = world_size
def send(self, rank, clear=False):
self.ready_events[rank].set()
if clear:
self.ready_events[rank].clear()
async def wait(self, should_wait=True):
if should_wait:
for i in range(self.world_size):
await self.ready_events[i].wait()
|
|
#!/usr/bin/python
# Copyright 2015 Coron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'duanqz@gmail.com'
import os
import shutil, commands
import tempfile
import re
import deodex, deoat
from common import Log, Options
# Global
TAG="reverse-zipformatter"
class ZipFormatter:
""" Abstract Model of ZIP Formatter
"""
def __init__(self, zipModel):
self.mZipModel = zipModel
def format(self, zipBack=True):
self.mZipModel.unzip()
if not self.mZipModel.isFormatted():
self.doFormat()
if zipBack:
self.mZipModel.zip()
def doFormat(self):
Log.e(TAG, "No implementation of ZipFormatter.doFormat() if found.")
raise Exception("Should be implemented by sub-class!")
def getFilesRoot(self):
""" Get the root directory of unziped files
"""
return self.mZipModel.getRoot();
@staticmethod
def genOptions(inZip):
""" Generate options.
Only format framework.
"""
options = Options()
options.inZip = inZip
options.outZip = inZip + ".std.zip"
options.formatFrw = True
return options
@staticmethod
def create(options):
""" Create a zip formatter for the incoming zip file
"""
zipModel = ZipModel(options.inZip, options.outZip)
zipType = zipModel.getZipType()
Log.i(TAG, "process(): Creating %s ZipFormatter..." %zipType)
if zipType == ZipModel.ART:
deoat.OPTIONS = options
return Deoat(zipModel)
elif zipType == ZipModel.DVM:
deodex.OPTIONS = options
return Deodex(zipModel)
else:
raise Exception("Unknown OTA package zip. Is it an ART or DALVIKVM package?")
class Deodex(ZipFormatter):
""" De-odex formatter
"""
def __init__(self, zipModel):
ZipFormatter.__init__(self, zipModel)
def doFormat(self):
deodex.OdexZip(self.getFilesRoot()).deodex()
class Deoat(ZipFormatter):
""" De-oat formatter
"""
def __init__(self, zipModel):
ZipFormatter.__init__(self, zipModel)
def doFormat(self):
deoat.OatZip(self.getFilesRoot()).deoat().rebuild()
class ZipModel:
""" Model of an OTA package zip
"""
ART = "ART"
DVM = "DVM"
DAT2IMG = os.path.join(os.path.dirname(__file__), "de-dat", "dedat.sh")
def __init__(self, inZip, outZip):
self.mInZip = inZip
self.mOutZip = outZip
self.mRoot = None
def unzip(self):
# Already unziped
if self.mRoot is not None: return
self.mRoot = tempfile.mkdtemp()
Log.i(TAG, "unzip %s to %s" % (self.mInZip, self.mRoot))
cmd = "unzip -q -o %s -d %s" %(self.mInZip, self.mRoot)
Log.d(TAG, commands.getoutput(cmd))
self.dedatIfNeeded()
# Format path
if os.path.exists(os.path.join(self.mRoot, "SYSTEM")):
shutil.move(os.path.join(self.mRoot, "SYSTEM"), os.path.join(self.mRoot, "system"))
return self
def zip(self):
if self.mRoot is None: return
origDir = os.path.abspath(os.curdir)
Log.i(TAG, "zip from %s to %s" % (self.mRoot, self.mOutZip))
os.chdir(self.mRoot)
cmd = "zip -r -y -q tmp *; mv tmp.zip %s" % self.mOutZip
Log.d(TAG, commands.getoutput(cmd))
os.chdir(origDir)
Log.i(TAG, "Deleting %s" % self.mRoot)
shutil.rmtree(self.mRoot)
Log.i(TAG, "===> %s" % self.mOutZip)
def getRoot(self):
""" Note: This method is not thread-safe.
"""
if self.mRoot is None: self.unzip()
return self.mRoot
def isFormatted(self):
return False
def dedatIfNeeded(self):
""" Android 5.0 zip structure:
* META-INF (folder containing scripts)
* system.new.dat (compressed /system partition)
* system.patch.dat
* system.transfer.list (see explanation below)
"""
if not os.path.exists(os.path.join(self.mRoot, "system.new.dat")):
return
if not os.path.exists(os.path.join(self.mRoot, "system.transfer.list")):
return
if os.geteuid() != 0:
raise Exception("DEDAT should be executed as root.")
cmd = "%s %s" % (commands.mkarg(ZipModel.DAT2IMG), commands.mkarg(self.mRoot))
Log.d(TAG, commands.getoutput(cmd))
def getZipType(self):
""" Retrieve the OTA package type
The property <persist.sys.dalvik.vm.lib> defines the VM type.
If libart.so is used, it is an ART package;
If libdvm.so is used, it is an DVM package.
"""
if self.mRoot is None: self.unzip()
buildProp = os.path.join(self.mRoot, "system/build.prop")
# Retrieve the <persist.sys.dalvik.vm.lib> in build.prop
zipType = None
if os.path.exists(buildProp):
fileHandle = open(buildProp, "r")
content = fileHandle.read()
vmlib = re.compile("\n.*sys.dalvik.vm.lib.*=\s*(?P<lib>.*)\n")
match = vmlib.search(content)
if match is not None:
libType = match.group("lib")
Log.d(TAG, "sys.dalvik.vm.lib=%s" % libType)
else:
libType = ""
fileHandle.close()
else:
raise Exception("Could not find %s, unknown ota type" %buildProp)
if libType.find("art") >= 0:
zipType = ZipModel.ART
elif libType.find("dvm") >= 0:
zipType = ZipModel.DVM
return zipType
def debug():
inZip = "/home/duanqizhi/tmp/n5-dat/n5-dat.zip"
outZip = "/home/duanqizhi/tmp/n5-dat/n5-dat.std.zip"
zipModel = ZipModel(inZip, outZip)
#zipModel.mRoot = "/home/duanqizhi/tmp/n5-dat"
print "Zip Type: %s" % zipModel.getZipType()
if __name__ == "__main__":
debug()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from functools import partial
import numpy as np
import os
import paddle.fluid as fluid
import paddle.fluid.layers as layers
pos_enc_param_names = (
"src_pos_enc_table",
"trg_pos_enc_table", )
batch_size = 2
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: queries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_key,
fan_out=n_head * d_key),
bias_attr=False,
num_flatten_dims=2)
k = layers.fc(input=keys,
size=d_key * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_key,
fan_out=n_head * d_key),
bias_attr=False,
num_flatten_dims=2)
v = layers.fc(input=values,
size=d_value * n_head,
param_attr=fluid.initializer.Xavier(
uniform=False,
fan_in=d_model * d_value,
fan_out=n_head * d_value),
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of input tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
if n_head == 1:
return x
hidden_size = x.shape[-1]
# FIXME(guosheng): Decouple the program desc with batch_size.
reshaped = layers.reshape(
x=x, shape=[batch_size, -1, n_head, hidden_size // n_head])
# permute the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of input tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# FIXME(guosheng): Decouple the program desc with batch_size.
return layers.reshape(
x=trans_x,
shape=list(
map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]
])))
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
"""
Scaled Dot-Product Attention
"""
# FIXME(guosheng): Optimize the shape in reshape_op or softmax_op.
# The current implementation of softmax_op only supports 2D tensor,
# consequently it cannot be directly used here.
# If to use the reshape_op, Besides, the shape of product inferred in
# compile-time is not the actual shape in run-time. It can't be used
# to set the attribute of reshape_op.
# So, here define the softmax for temporary solution.
def __softmax(x, eps=1e-9):
exp_out = layers.exp(x=x)
sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False)
return layers.elementwise_div(x=exp_out, y=sum_out, axis=0)
scaled_q = layers.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias))
if dropout_rate:
weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
param_attr=fluid.initializer.Xavier(uniform=False),
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
param_attr=fluid.initializer.Uniform(
low=-(d_hid**-0.5), high=(d_hid**-0.5)),
act="relu")
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.initializer.Uniform(
low=-(d_inner_hid**-0.5), high=(d_inner_hid**-0.5)))
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.initializer.Constant(1.),
bias_attr=fluid.initializer.Constant(0.))
elif cmd == "d": # add dropout
if dropout:
out = layers.dropout(out, dropout_prob=dropout, is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_pad_idx,
src_max_len,
dropout=0.,
pos_pad_idx=0,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word,
size=[src_vocab_size, src_emb_dim],
padding_idx=src_pad_idx,
param_attr=fluid.initializer.Normal(0., 1.))
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
padding_idx=pos_pad_idx,
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
src_pos_enc.stop_gradient = True
enc_input = src_word_emb + src_pos_enc
# FIXME(guosheng): Decouple the program desc with batch_size.
enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim])
return layers.dropout(
enc_input, dropout_prob=dropout,
is_test=False) if dropout else enc_input
prepare_encoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(enc_input, enc_input, enc_input,
attn_bias, d_key, d_value, d_model,
n_head, dropout_rate)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value,
d_model, d_inner_hid, dropout_rate)
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = dec_output
return dec_output
def build_inputs(max_length, n_head):
names = [
'src_word',
'src_pos',
'trg_word',
'trg_pos',
'src_slf_attn_bias',
'trg_slf_attn_bias',
'trg_src_attn_bias',
'gold',
'weights',
]
shapes = [
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
[batch_size, n_head, max_length, max_length],
[batch_size, n_head, max_length, max_length],
[batch_size, n_head, max_length, max_length],
[batch_size * max_length, 1],
[batch_size * max_length, 1],
]
dtypes = [
'int64',
'int64',
'int64',
'int64',
'float32',
'float32',
'float32',
'int64',
'float32',
]
all_inputs = []
for name, shape, dtype in zip(names, shapes, dtypes):
all_inputs.append(
fluid.layers.data(
name=name, shape=shape, dtype=dtype, append_batch_size=False))
return all_inputs
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
src_pad_idx,
trg_pad_idx,
pos_pad_idx, ):
src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, gold, weights = build_inputs(
max_length, n_head)
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
src_pad_idx,
max_length,
dropout_rate, )
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
trg_pad_idx,
max_length,
dropout_rate, )
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
# TODO(guosheng): Share the weight matrix between the embedding layers and
# the pre-softmax linear transformation.
predict = layers.reshape(
x=layers.fc(input=dec_output,
size=trg_vocab_size,
param_attr=fluid.initializer.Xavier(uniform=False),
bias_attr=False,
num_flatten_dims=2),
shape=[-1, trg_vocab_size],
act="softmax")
cost = layers.cross_entropy(input=predict, label=gold)
weighted_cost = cost * weights
return layers.reduce_sum(weighted_cost)
|
|
import weakref
from abc import ABCMeta, abstractmethod
from datetime import datetime
import six
from corehq.util.pagination import PaginationEventHandler, TooManyRetries
class BulkProcessingFailed(Exception):
pass
DOCS_SKIPPED_WARNING = """
WARNING {} documents were not processed due to concurrent modification
during migration. Run the migration again until you do not see this
message.
"""
class BaseDocProcessor(six.with_metaclass(ABCMeta)):
"""Base class for processors that get passed"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def process_doc(self, doc):
"""Process a single document
:param doc: The document dict to be processed.
:returns: True if doc was processed successfully else False. If this returns False
the document migration will be retried later.
"""
raise NotImplementedError
def process_bulk_docs(self, docs):
"""Process a batch of documents. The default implementation passes
each doc in turn to ``process_doc``.
:param docs: A list of document dicts to be processed.
:returns: True if doc was processed successfully else False.
If this returns False the processing will be halted.
"""
return all(self.process_doc(doc) for doc in docs)
def handle_skip(self, doc):
"""Called when a document is going to be skipped i.e. it has been
retried > max_retries.
:returns: True to indicate that the skip has been handled
or False to stop execution
"""
return False
def processing_complete(self, skipped):
pass
def should_process(self, doc):
"""
:param doc: the document to filter
:return: True if this doc should be migrated
"""
return True
class ProcessorProgressLogger(object):
def progress_starting(self, total, previously_visited):
print("Processing {} documents{}: ...".format(
total,
" (~{} already processed)".format(previously_visited) if previously_visited else ""
))
def document_skipped(self, doc_dict):
print("Skip: {doc_type} {_id}".format(**doc_dict))
def progress(self, processed, visited, total, time_elapsed, time_remaining):
print("Processed {}/{} of {} documents in {} ({} remaining)"
.format(processed, visited, total, time_elapsed, time_remaining))
def progress_complete(self, processed, visited, total, previously_visited, filtered):
print("Processed {}/{} of {} documents ({} previously processed, {} filtered out).".format(
processed,
visited,
total,
previously_visited,
filtered
))
class DocumentProvider(six.with_metaclass(ABCMeta)):
@abstractmethod
def get_document_iterator(self, chunk_size, event_handler=None):
"""
:param chunk_size: Maximum number of records to read from the database at one time
:param event_handler: instance of ``PaginateViewLogHandler`` to be notified of view events.
:return: an instance of ``ResumableFunctionIterator``
"""
raise NotImplementedError
@abstractmethod
def get_total_document_count(self):
"""
:return: the total count of documents expected
"""
raise NotImplementedError
class DocumentProcessorController(object):
"""Process Docs
:param document_provider: A ``DocumentProvider`` object
:param doc_processor: A ``BaseDocProcessor`` object used to process documents.
:param reset: Reset existing processor state (if any), causing all
documents to be reconsidered for processing, if this is true.
:param max_retry: Number of times to retry processing a document before giving up.
:param chunk_size: Maximum number of records to read from couch at
one time. It may be necessary to use a smaller chunk size if the
records being processed are very large and the default chunk size of
100 would exceed available memory.
:param event_handler: A ``PaginateViewLogHandler`` object to be notified of pagination events.
:param progress_logger: A ``ProcessorProgressLogger`` object to notify of progress events.
"""
def __init__(self, document_provider, doc_processor, reset=False, max_retry=2,
chunk_size=100, event_handler=None, progress_logger=None):
self.doc_processor = doc_processor
self.reset = reset
self.max_retry = max_retry
self.chunk_size = chunk_size
self.progress_logger = progress_logger or ProcessorProgressLogger()
self.document_provider = document_provider
self.document_iterator = self.document_provider.get_document_iterator(chunk_size, event_handler)
self.visited = 0
self.previously_visited = 0
self.total = 0
self.processed = 0
self.skipped = 0
self.start = None
def has_started(self):
return bool(self.document_iterator.get_iterator_detail('progress'))
@property
def session_visited(self):
return self.visited - self.previously_visited
@property
def session_total(self):
return self.total - self.previously_visited
@property
def attempted(self):
return self.processed + self.skipped
@property
def timing(self):
"""Returns a tuple of (elapsed, remaining)"""
elapsed = datetime.now() - self.start
if self.session_visited > self.session_total:
remaining = "?"
else:
session_remaining = self.session_total - self.session_visited
remaining = elapsed / self.session_visited * session_remaining
return elapsed, remaining
def _setup(self):
self.total = self.document_provider.get_total_document_count()
if self.reset:
self.document_iterator.discard_state()
elif self.document_iterator.get_iterator_detail('progress'):
info = self.document_iterator.get_iterator_detail('progress')
old_total = info["total"]
# Estimate already visited based on difference of old/new
# totals. The theory is that new or deleted records will be
# evenly distributed across the entire set.
self.visited = int(round(float(self.total) / old_total * info["visited"]))
self.previously_visited = self.visited
self.progress_logger.progress_starting(self.total, self.previously_visited)
self.start = datetime.now()
def run(self):
"""
:returns: A tuple `(<num processed>, <num skipped>)`
"""
self._setup()
with self.doc_processor:
for doc in self.document_iterator:
self._process_doc(doc)
self._update_progress()
self._processing_complete()
return self.processed, self.skipped
def _process_doc(self, doc):
if not self.doc_processor.should_process(doc):
return
ok = self.doc_processor.process_doc(doc)
if ok:
self.processed += 1
else:
try:
self.document_iterator.retry(doc['_id'], self.max_retry)
except TooManyRetries:
if not self.doc_processor.handle_skip(doc):
raise
else:
self.progress_logger.document_skipped(doc)
self.skipped += 1
def _update_progress(self):
self.visited += 1
if self.visited % self.chunk_size == 0:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
if self.attempted % self.chunk_size == 0:
elapsed, remaining = self.timing
self.progress_logger.progress(
self.processed, self.visited, self.total, elapsed, remaining
)
def _processing_complete(self):
if self.session_visited:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
self.doc_processor.processing_complete(self.skipped)
self.progress_logger.progress_complete(
self.processed,
self.visited,
self.total,
self.previously_visited,
self.session_visited - self.attempted
)
if self.skipped:
print(DOCS_SKIPPED_WARNING.format(self.skipped))
class BulkDocProcessorEventHandler(PaginationEventHandler):
def __init__(self, processor):
self.processor_ref = weakref.ref(processor)
def page_end(self, total_emitted, duration, *args, **kwargs):
processor = self.processor_ref()
if processor:
processor.process_chunk()
else:
raise BulkProcessingFailed("Processor has gone away")
class BulkDocProcessor(DocumentProcessorController):
"""Process docs in batches
The bulk doc processor will send a batch of documents to the document
processor. If the processor does not respond with True then
the iteration is halted. Restarting the iteration will start by
re-sending the previous chunk to the processor.
The size of the batches passed to the document processor may vary
depending on how they are being filtered by the
document processor but will never exceed ``chunk_size``.
:param document_provider: A ``DocumentProvider`` object
:param doc_processor: A ``BaseDocProcessor`` object used to process documents.
:param reset: Reset existing processor state (if any), causing all
documents to be reconsidered for processing, if this is true.
:param max_retry: Number of times to retry processing a document before giving up.
:param chunk_size: Maximum number of records to read from couch at
one time. It may be necessary to use a smaller chunk size if the
records being processed are very large and the default chunk size of
100 would exceed available memory.
:param progress_logger: A ``ProcessorProgressLogger`` object to notify of progress events.
"""
def __init__(self, document_provider, doc_processor, reset=False, max_retry=2,
chunk_size=100, progress_logger=None):
event_handler = BulkDocProcessorEventHandler(self)
super(BulkDocProcessor, self).__init__(
document_provider, doc_processor, reset, max_retry, chunk_size,
event_handler, progress_logger
)
self.changes = []
def _process_doc(self, doc):
if self.doc_processor.should_process(doc):
self.changes.append(doc)
def process_chunk(self):
"""Called by the BulkDocProcessorLogHandler"""
ok = self.doc_processor.process_bulk_docs(self.changes)
if ok:
self.processed += len(self.changes)
self.changes = []
else:
raise BulkProcessingFailed("Processing batch failed")
def _update_progress(self):
self.visited += 1
if self.visited % self.chunk_size == 0:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
elapsed, remaining = self.timing
self.progress_logger.progress(
self.total, self.processed, self.visited, elapsed, remaining
)
|
|
"""This module implements tools for integrating rational functions."""
from ..core import Dummy, I, Integer, Lambda, Symbol, symbols, sympify
from ..domains import ZZ
from ..functions import atan, log
from ..polys import Poly, RootSum, cancel, resultant, roots
from ..simplify import collect
from ..solvers import solve
def ratint(f, x, **flags):
"""Performs indefinite integration of rational functions.
Given a field `K` and a rational function `f = p/q`,
where `p` and `q` are polynomials in `K[x]`,
returns a function `g` such that `f = g'`.
>>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x)
(12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1)
References
==========
* :cite:`Bronstein2005integration`, pp. 35-70
See Also
========
diofant.integrals.integrals.Integral.doit
ratint_logpart
ratint_ratpart
"""
if type(f) is not tuple:
p, q = f.as_numer_denom()
else:
p, q = f
p, q = p.as_poly(x, composite=False, field=True), q.as_poly(x, composite=False, field=True)
coeff, p, q = p.cancel(q)
poly, p = p.div(q)
result = poly.integrate(x).as_expr()
if p.is_zero:
return coeff*result
g, h = ratint_ratpart(p, q, x)
P, Q = h.as_numer_denom()
P = P.as_poly(x)
Q = Q.as_poly(x)
q, r = P.div(Q)
result += g + q.integrate(x).as_expr()
if not r.is_zero:
symbol = flags.get('symbol', 't')
if not isinstance(symbol, Symbol):
t = Dummy(symbol)
else:
t = symbol.as_dummy()
L = ratint_logpart(r, Q, x, t)
ereal = flags.get('extended_real')
if ereal is None:
if type(f) is not tuple:
atoms = f.atoms()
else:
p, q = f
atoms = p.atoms() | q.atoms()
for elt in atoms - {x}:
if not elt.is_extended_real:
ereal = False
break
else:
ereal = True
eps = Integer(0)
if not ereal:
for h, q in L:
_, h = h.primitive()
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
else:
for h, q in L:
_, h = h.primitive()
R = log_to_real(h, q, x, t)
if R is not None:
eps += R
else:
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
result += eps
return coeff*result
def ratint_ratpart(f, g, x):
"""Horowitz-Ostrogradsky algorithm.
Given a field K and polynomials f and g in K[x], such that f and g
are coprime and deg(f) < deg(g), returns fractions A and B in K(x),
such that f/g = A' + B and B has square-free denominator.
Examples
========
>>> ratint_ratpart(1, x + 1, x)
(0, 1/(x + 1))
>>> ratint_ratpart(1, x**2 + y**2, x)
(0, 1/(x**2 + y**2))
>>> ratint_ratpart(36, x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x)
((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2))
See Also
========
ratint
ratint_logpart
"""
f = sympify(f).as_poly(x)
g = sympify(g).as_poly(x)
u, v, _ = g.cofactors(g.diff(x))
n = u.degree()
m = v.degree()
A_coeffs = [Dummy('a' + str(n - i)) for i in range(n)]
B_coeffs = [Dummy('b' + str(m - i)) for i in range(m)]
C_coeffs = A_coeffs + B_coeffs
A = Poly(A_coeffs, x, domain=ZZ.inject(*C_coeffs))
B = Poly(B_coeffs, x, domain=ZZ.inject(*C_coeffs))
H = f - A.diff(x)*v + A*(u.diff(x)*v).quo(u) - B*u
result = solve(H.coeffs(), C_coeffs)[0]
A = A.as_expr().subs(result)
B = B.as_expr().subs(result)
rat_part = cancel(A/u.as_expr(), x)
log_part = cancel(B/v.as_expr(), x)
return rat_part, log_part
def ratint_logpart(f, g, x, t=None):
r"""Lazard-Rioboo-Trager algorithm.
Given a field K and polynomials f and g in K[x], such that f and g
are coprime, deg(f) < deg(g) and g is square-free, returns a list
of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i
in K[t, x] and q_i in K[t], and::
___ ___
d f d \ ` \ `
-- - = -- ) ) a log(s_i(a, x))
dx g dx /__, /__,
i=1..n a | q_i(a) = 0
Examples
========
>>> ratint_logpart(1, x**2 + x + 1, x)
[(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'),
Poly(3*_t**2 + 1, _t, domain='ZZ'))]
>>> ratint_logpart(12, x**2 - x - 2, x)
[(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'),
Poly(_t**2 - 16, _t, domain='ZZ'))]
See Also
========
ratint
ratint_ratpart
"""
f, g = sympify(f).as_poly(x), sympify(g).as_poly(x)
t = t or Dummy('t')
a, b = g, f - g.diff(x)*t.as_poly(x)
res, R = resultant(a, b, includePRS=True)
res = res.as_poly(t, composite=False)
assert res, f"BUG: resultant({a}, {b}) can't be zero"
R_map, H = {}, []
for r in R:
R_map[r.degree()] = r
def _include_sign(c, sqf):
if c.is_negative:
h, k = sqf[0]
sqf[0] = h*c, k
C, res_sqf = res.sqf_list()
_include_sign(C, res_sqf)
for q, i in res_sqf:
_, q = q.primitive()
if g.degree() == i:
H.append((g, q))
else:
h = R_map[i]
h_lc = h.LC().as_poly(t, field=True)
c, h_lc_sqf = h_lc.sqf_list()
_include_sign(c, h_lc_sqf)
for a, j in h_lc_sqf:
h = h.quo((a.gcd(q)**j).as_poly(x))
inv, coeffs = h_lc.invert(q), [Integer(1)]
for coeff in h.coeffs()[1:]:
T = (inv*coeff).rem(q)
coeffs.append(T.as_expr())
h = Poly(dict(zip(h.monoms(), coeffs)), x)
H.append((h, q))
return H
def log_to_atan(f, g):
"""Convert complex logarithms to real arctangents.
Given a real field K and polynomials f and g in K[x], with g != 0,
returns a sum h of arctangents of polynomials in K[x], such that:
dh d f + I g
-- = -- I log( ------- )
dx dx f - I g
Examples
========
>>> log_to_atan(x.as_poly(), Integer(1).as_poly(x))
2*atan(x)
>>> log_to_atan((x + Rational(1, 2)).as_poly(x), (sqrt(3)/2).as_poly(x))
2*atan(2*sqrt(3)*x/3 + sqrt(3)/3)
See Also
========
log_to_real
"""
if f.degree() < g.degree():
f, g = -g, f
f = f.to_field()
g = g.to_field()
p, q = f.div(g)
if q.is_zero:
return 2*atan(p.as_expr())
else:
s, t, h = g.gcdex(-f)
u = (f*s + g*t).quo(h)
A = 2*atan(u.as_expr())
return A + log_to_atan(s, t)
def log_to_real(h, q, x, t):
r"""Convert complex logarithms to real functions.
Given real field K and polynomials h in K[t,x] and q in K[t],
returns real function f such that:
___
df d \ `
-- = -- ) a log(h(a, x))
dx dx /__,
a | q(a) = 0
Examples
========
>>> log_to_real((x + 3*y/2 + Rational(1, 2)).as_poly(x),
... (3*y**2 + 1).as_poly(y), x, y)
2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3
>>> log_to_real((x**2 - 1).as_poly(), (-2*y + 1).as_poly(y), x, y)
log(x**2 - 1)/2
See Also
========
log_to_atan
"""
u, v = symbols('u,v', cls=Dummy)
H = h.as_expr().subs({t: u + I*v}).expand()
Q = q.as_expr().subs({t: u + I*v}).expand()
H_map = collect(H, I, evaluate=False)
Q_map = collect(Q, I, evaluate=False)
a, b = H_map.get(Integer(1), Integer(0)), H_map.get(I, Integer(0))
c, d = Q_map.get(Integer(1), Integer(0)), Q_map.get(I, Integer(0))
R = resultant(c, d, v).as_poly(u)
R_u_all = roots(R)
R_q_all = roots(q)
if sum(R_u_all.values()) < R.degree() or sum(R_q_all.values()) < q.degree():
return
R_u = {k: v for k, v in R_u_all.items() if k.is_extended_real}
R_q = {k: v for k, v in R_q_all.items() if k.is_extended_real}
result = Integer(0)
for r_u in R_u:
C = c.subs({u: r_u}).as_poly(v, extension=False)
R_v_all = roots(C)
if sum(R_v_all.values()) < C.degree():
return
R_v = {k: v for k, v in R_v_all.items() if k.is_extended_real is not False}
R_v_paired = [] # take one from each pair of conjugate roots
for r_v in R_v:
if all(_ not in R_v_paired for _ in [+r_v, -r_v]):
if r_v.could_extract_minus_sign():
R_v_paired.append(-r_v)
for r_v in R_v_paired:
D = d.subs({u: r_u, v: r_v})
if D.cancel().evalf(2, chop=True) != 0:
continue
A = a.subs({u: r_u, v: r_v}).as_poly(x, extension=False)
B = b.subs({u: r_u, v: r_v}).as_poly(x, extension=False)
AB = (A**2 + B**2).as_expr()
result += r_u*log(AB) + r_v*log_to_atan(A, B)
for r in R_q:
result += r*log(h.as_expr().subs({t: r}))
return result
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': '',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest), search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_property_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 0
image.properties['min_disk'] = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_delete_volume_with_snap_no_action_item(self):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_create_snapshot_button_disabled_when_quota_exceeded(self):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).AndReturn(limits)
self.mox.ReplayAll()
create_link = tables.CreateSnapshot()
url = reverse(create_link.get_link_url(), args=[volume.id])
res_url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class=\"%s disabled\" "\
"id=\"volumes__row_%s__action_snapshots\">%s</a>" \
% (url, " ".join(classes), volume.id, link_name)
self.assertContains(
res, expected_string, html=True,
msg_prefix="The create snapshot button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (unicode(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h1>Volume Details: Volume name</h1>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_encrypted(self):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_unencrypted(self):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
'',
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
'update bootable flag')
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
True)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_upload_to_image',
'volume_get')})
def test_upload_to_image(self):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_upload_to_image(
IsA(http.HttpRequest),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format']).AndReturn(loaded_resp)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_extend'),
quotas: ('tenant_limit_usages',)})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, 'form', None,
"New size must be greater than "
"current size.")
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_retype_volume_supported_action_item(self):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'volume_retype',
'volume_type_list')})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
cinder.volume_retype(
IsA(http.HttpRequest),
volume.id,
form_data['volume_type'],
form_data['migration_policy']).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_type_list')})
def test_retype_volume_same_type(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_2')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertFormError(res,
'form',
'volume_type',
'New volume type must be different from the '
'original volume type "%s".' % volume_type.name)
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn(self.cinder_volume_snapshots.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_size_out_of_quota(self):
volume = self.volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GB as you only "
"have 80GB of your quota available.")
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
@test.create_stubs({cinder: ('transfer_create',)})
def test_create_transfer(self):
volumes = self.volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': u'any transfer name'}
cinder.transfer_create(IsA(http.HttpRequest),
formData['volume_id'],
formData['name']).AndReturn(
self.cinder_volume_transfers.first())
self.mox.ReplayAll()
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.transfer_delete(IsA(http.HttpRequest), transfer.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('transfer_accept',)})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
cinder.transfer_accept(IsA(http.HttpRequest), transfer.id,
transfer.auth_key)
self.mox.ReplayAll()
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import time
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.lib.i18n import smart_str
from desktop.lib.view_util import format_duration_in_millis
from filebrowser.views import location_to_url
from jobbrowser.views import job_single_logs
from liboozie.oozie_api import get_oozie
from oozie.models import Workflow, Pig
from oozie.views.editor import _submit_workflow
LOG = logging.getLogger(__name__)
def get(fs, jt, user):
return OozieApi(fs, jt, user)
class OozieApi:
"""
Oozie submission.
"""
WORKFLOW_NAME = 'pig-app-hue-script'
RE_LOG_END = re.compile('(<<< Invocation of Pig command completed <<<|<<< Invocation of Main class completed <<<)')
RE_LOG_START_RUNNING = re.compile('>>> Invoking Pig command line now >>>(.+?)(<<< Invocation of Pig command completed <<<|<<< Invocation of Main class completed)', re.M | re.DOTALL)
RE_LOG_START_FINISHED = re.compile('(>>> Invoking Pig command line now >>>)', re.M | re.DOTALL)
MAX_DASHBOARD_JOBS = 100
def __init__(self, fs, jt, user):
self.fs = fs
self.jt = jt
self.user = user
def submit(self, pig_script, params):
workflow = None
try:
workflow = self._create_workflow(pig_script, params)
mapping = dict([(param['name'], param['value']) for param in workflow.get_parameters()])
oozie_wf = _submit_workflow(self.user, self.fs, self.jt, workflow, mapping)
finally:
if workflow:
workflow.delete(skip_trash=True)
return oozie_wf
def _create_workflow(self, pig_script, params):
workflow = Workflow.objects.new_workflow(self.user)
workflow.name = OozieApi.WORKFLOW_NAME
workflow.is_history = True
if pig_script.use_hcatalog:
workflow.add_parameter("oozie.action.sharelib.for.pig", "pig,hcatalog")
workflow.save()
Workflow.objects.initialize(workflow, self.fs)
script_path = workflow.deployment_dir + '/script.pig'
if self.fs: # For testing, difficult to mock
self.fs.do_as_user(self.user.username, self.fs.create, script_path, data=smart_str(pig_script.dict['script']))
files = []
archives = []
popup_params = json.loads(params)
popup_params_names = [param['name'] for param in popup_params]
pig_params = self._build_parameters(popup_params)
script_params = [param for param in pig_script.dict['parameters'] if param['name'] not in popup_params_names]
pig_params += self._build_parameters(script_params)
job_properties = [{"name": prop['name'], "value": prop['value']} for prop in pig_script.dict['hadoopProperties']]
for resource in pig_script.dict['resources']:
if resource['type'] == 'file':
files.append(resource['value'])
if resource['type'] == 'archive':
archives.append({"dummy": "", "name": resource['value']})
action = Pig.objects.create(
name='pig',
script_path=script_path,
workflow=workflow,
node_type='pig',
params=json.dumps(pig_params),
files=json.dumps(files),
archives=json.dumps(archives),
job_properties=json.dumps(job_properties),
)
action.add_node(workflow.end)
start_link = workflow.start.get_link()
start_link.child = action
start_link.save()
return workflow
def _build_parameters(self, params):
pig_params = []
for param in params:
if param['name'].startswith('-'):
pig_params.append({"type": "argument", "value": "%(name)s" % param})
if param['value']:
pig_params.append({"type": "argument", "value": "%(value)s" % param})
else:
# Simpler way and backward compatibility for parameters
pig_params.append({"type": "argument", "value": "-param"})
pig_params.append({"type": "argument", "value": "%(name)s=%(value)s" % param})
return pig_params
def stop(self, job_id):
return get_oozie(self.user).job_control(job_id, 'kill')
def get_jobs(self):
kwargs = {'cnt': OozieApi.MAX_DASHBOARD_JOBS,}
kwargs['filters'] = [
('user', self.user.username),
('name', OozieApi.WORKFLOW_NAME)
]
return get_oozie(self.user).get_workflows(**kwargs).jobs
def get_log(self, request, oozie_workflow):
logs = {}
is_really_done = False
for action in oozie_workflow.get_working_actions():
try:
if action.externalId:
data = job_single_logs(request, **{'job': action.externalId})
if data:
matched_logs = self._match_logs(data)
logs[action.name] = self._make_links(matched_logs)
is_really_done = OozieApi.RE_LOG_END.search(data['logs'][1]) is not None
except Exception, e:
LOG.error('An error happen while watching the demo running: %(error)s' % {'error': e})
is_really_done = True
workflow_actions = []
# Only one Pig action
for action in oozie_workflow.get_working_actions():
progress = get_progress(oozie_workflow, logs.get(action.name, ''))
appendable = {
'name': action.name,
'status': action.status,
'logs': logs.get(action.name, ''),
'isReallyDone': is_really_done,
'progress': progress,
'progressPercent': '%d%%' % progress,
'absoluteUrl': oozie_workflow.get_absolute_url(),
}
workflow_actions.append(appendable)
return logs, workflow_actions, is_really_done
def _match_logs(self, data):
"""Difficult to match multi lines of text"""
logs = data['logs'][1]
if OozieApi.RE_LOG_END.search(logs):
return re.search(OozieApi.RE_LOG_START_RUNNING, logs).group(1).strip()
else:
group = re.search(OozieApi.RE_LOG_START_FINISHED, logs)
i = logs.index(group.group(1)) + len(group.group(1))
return logs[i:].strip()
@classmethod
def _make_links(cls, log):
escaped_logs = escape(log)
hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', OozieApi._make_hdfs_link, escaped_logs)
return re.sub('(job_[0-9_]+(/|\.)?)', OozieApi._make_mr_link, hdfs_links)
@classmethod
def _make_hdfs_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (location_to_url(match.group(0), strict=False), match.group(0))
except:
return match.group(0)
@classmethod
def _make_mr_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))
except:
return match.group(0)
def massaged_jobs_for_json(self, request, oozie_jobs, hue_jobs):
jobs = []
hue_jobs = dict([(script.dict.get('job_id'), script) for script in hue_jobs if script.dict.get('job_id')])
for job in oozie_jobs:
if job.is_running():
job = get_oozie(self.user).get_job(job.id)
get_copy = request.GET.copy() # Hacky, would need to refactor JobBrowser get logs
get_copy['format'] = 'python'
request.GET = get_copy
try:
logs, workflow_action, is_really_done = self.get_log(request, job)
progress = workflow_action[0]['progress']
except:
progress = 0
else:
progress = 100
hue_pig = hue_jobs.get(job.id) and hue_jobs.get(job.id) or None
massaged_job = {
'id': job.id,
'lastModTime': hasattr(job, 'lastModTime') and job.lastModTime and format_time(job.lastModTime) or None,
'kickoffTime': hasattr(job, 'kickoffTime') and job.kickoffTime or None,
'timeOut': hasattr(job, 'timeOut') and job.timeOut or None,
'endTime': job.endTime and format_time(job.endTime) or None,
'status': job.status,
'isRunning': job.is_running(),
'duration': job.endTime and job.startTime and format_duration_in_millis(( time.mktime(job.endTime) - time.mktime(job.startTime) ) * 1000) or None,
'appName': hue_pig and hue_pig.dict['name'] or _('Unsaved script'),
'scriptId': hue_pig and hue_pig.id or -1,
'scriptContent': hue_pig and hue_pig.dict['script'] or '',
'progress': progress,
'progressPercent': '%d%%' % progress,
'user': job.user,
'absoluteUrl': job.get_absolute_url(),
'canEdit': has_job_edition_permission(job, self.user),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'kill'}),
'watchUrl': reverse('pig:watch', kwargs={'job_id': job.id}) + '?format=python',
'created': hasattr(job, 'createdTime') and job.createdTime and job.createdTime and ((job.type == 'Bundle' and job.createdTime) or format_time(job.createdTime)),
'startTime': hasattr(job, 'startTime') and format_time(job.startTime) or None,
'run': hasattr(job, 'run') and job.run or 0,
'frequency': hasattr(job, 'frequency') and job.frequency or None,
'timeUnit': hasattr(job, 'timeUnit') and job.timeUnit or None,
}
jobs.append(massaged_job)
return jobs
def get_progress(job, log):
if job.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return 100
else:
try:
return int(re.findall("MapReduceLauncher - (1?\d?\d)% complete", log)[-1])
except:
return 0
def format_time(st_time):
if st_time is None:
return '-'
else:
return time.strftime("%a, %d %b %Y %H:%M:%S", st_time)
def has_job_edition_permission(oozie_job, user):
return user.is_superuser or oozie_job.user == user.username
|
|
"""
SoftLayer.tests.CLI.modules.dns_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
import os.path
import mock
from SoftLayer.CLI.dns import zone_import
from SoftLayer.CLI import exceptions
from SoftLayer import testing
class DnsTests(testing.TestCase):
def test_zone_print(self):
result = self.run_command(['dns', 'zone-print', '1234'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), "lots of text")
def test_create_zone(self):
result = self.run_command(['dns', 'zone-create', 'example.com'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
def test_delete_zone(self, no_going_back_mock):
no_going_back_mock.return_value = True
result = self.run_command(['dns', 'zone-delete', '1234'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
no_going_back_mock.return_value = False
result = self.run_command(['--really', 'dns', 'zone-delete', '1234'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
def test_delete_zone_abort(self, no_going_back_mock):
no_going_back_mock.return_value = False
result = self.run_command(['dns', 'zone-delete', '1234'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_list_zones(self):
result = self.run_command(['dns', 'zone-list'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output),
[{'serial': 2014030728,
'updated': '2014-03-07T13:52:31-06:00',
'id': 12345,
'zone': 'example.com'}])
def test_list_records(self):
result = self.run_command(['dns', 'record-list', '1234'])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output)[0],
{'record': 'a',
'type': 'CNAME',
'id': 1,
'data': 'd',
'ttl': 7200})
def test_add_record(self):
result = self.run_command(['dns', 'record-add', 'hostname', 'A',
'data', '--zone=1234', '--ttl=100'])
self.assert_no_fail(result)
self.assertEqual(str(result.output), 'A record added successfully\n')
def test_add_record_mx(self):
result = self.run_command(['dns', 'record-add', 'hostname', 'MX',
'data', '--zone=1234', '--ttl=100', '--priority=25'])
self.assert_no_fail(result)
self.assertEqual(str(result.output), 'MX record added successfully\n')
def test_add_record_srv(self):
result = self.run_command(['dns', 'record-add', 'hostname', 'SRV',
'data', '--zone=1234', '--protocol=udp',
'--port=88', '--ttl=100', '--weight=5'])
self.assert_no_fail(result)
self.assertEqual(str(result.output), 'SRV record added successfully\n')
def test_add_record_ptr(self):
result = self.run_command(['dns', 'record-add', '192.168.1.1', 'PTR',
'hostname', '--ttl=100'])
self.assert_no_fail(result)
self.assertEqual(str(result.output), 'PTR record added successfully\n')
def test_add_record_abort(self):
result = self.run_command(['dns', 'record-add', 'hostname', 'A',
'data', '--ttl=100'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
self.assertEqual(result.exception.message, "A isn't a valid record type or zone is missing")
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
def test_delete_record(self, no_going_back_mock):
no_going_back_mock.return_value = True
result = self.run_command(['dns', 'record-remove', '1234'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
def test_delete_record_abort(self, no_going_back_mock):
no_going_back_mock.return_value = False
result = self.run_command(['dns', 'record-remove', '1234'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_parse_zone_file(self):
zone_file = """$ORIGIN realtest.com.
$TTL 86400
@ IN SOA ns1.softlayer.com. support.softlayer.com. (
2014052300 ; Serial
7200 ; Refresh
600 ; Retry
1728000 ; Expire
43200) ; Minimum
@ 86400 IN NS ns1.softlayer.com.
@ 86400 IN NS ns2.softlayer.com.
IN MX 10 test.realtest.com.
testing 86400 IN A 127.0.0.1
testing1 86400 IN A 12.12.0.1
server2 IN A 1.0.3.4
ftp IN CNAME server2
dev.realtest.com IN TXT "This is just a test of the txt record"
IN AAAA 2001:db8:10::1
spf IN TXT "v=spf1 ip4:192.0.2.0/24 ip4:198.51.100.123 a"
*.testing 86400 IN A 127.0.0.2
* 86400 IN A 127.0.0.3
"""
expected = [{'data': 'ns1.softlayer.com.',
'record': '@',
'type': 'NS',
'ttl': '86400'},
{'data': 'ns2.softlayer.com.',
'record': '@',
'type': 'NS',
'ttl': '86400'},
{'data': '127.0.0.1',
'record': 'testing',
'type': 'A',
'ttl': '86400'},
{'data': '12.12.0.1',
'record': 'testing1',
'type': 'A',
'ttl': '86400'},
{'data': '1.0.3.4',
'record': 'server2',
'type': 'A',
'ttl': None},
{'data': 'server2',
'record': 'ftp',
'type': 'CNAME',
'ttl': None},
{'data': '"This is just a test of the txt record"',
'record': 'dev.realtest.com',
'type': 'TXT',
'ttl': None},
{'data': '"v=spf1 ip4:192.0.2.0/24 ip4:198.51.100.123 a"',
'record': 'spf',
'type': 'TXT',
'ttl': None},
{'data': '127.0.0.2',
'record': '*.testing',
'type': 'A',
'ttl': '86400'},
{'data': '127.0.0.3',
'record': '*',
'type': 'A',
'ttl': '86400'}]
zone, records, bad_lines = zone_import.parse_zone_details(zone_file)
self.assertEqual(zone, 'realtest.com')
self.assertEqual(records, expected)
self.assertEqual(len(bad_lines), 13)
def test_import_zone_dry_run(self):
path = os.path.join(testing.FIXTURE_PATH, 'realtest.com')
result = self.run_command(['dns', 'import', path, '--dry-run'])
self.assertIn("Parsed: zone=realtest.com", result.output)
self.assertIn(
"Parsed: type=NS, record=@, data=ns1.softlayer.com., ttl=86400",
result.output)
self.assertIn("Unparsed: $TTL 86400", result.output)
def test_import_zone(self):
path = os.path.join(testing.FIXTURE_PATH, 'realtest.com')
result = self.run_command(['dns', 'import', path])
self.assertEqual(self.calls('SoftLayer_Dns_Domain', 'createObject'),
[])
calls = self.calls('SoftLayer_Dns_Domain_ResourceRecord',
'createObject')
expected_calls = [{'data': 'ns1.softlayer.com.',
'host': '@',
'domainId': 12345,
'type': 'NS',
'ttl': '86400'},
{'data': 'ns2.softlayer.com.',
'host': '@',
'domainId': 12345,
'type': 'NS',
'ttl': '86400'},
{'data': '127.0.0.1',
'host': 'testing',
'domainId': 12345,
'type': 'A',
'ttl': '86400'},
{'data': '12.12.0.1',
'host': 'testing1',
'domainId': 12345,
'type': 'A',
'ttl': '86400'},
{'data': '1.0.3.4',
'host': 'server2',
'domainId': 12345,
'type': 'A',
'ttl': None},
{'data': 'server2',
'host': 'ftp',
'domainId': 12345,
'type': 'CNAME',
'ttl': None},
{'data':
'"This is just a test of the txt record"',
'host': 'dev.realtest.com',
'domainId': 12345,
'type': 'TXT',
'ttl': None},
{'data': '"v=spf1 ip4:192.0.2.0/24 '
'ip4:198.51.100.123 a -all"',
'host': 'spf',
'domainId': 12345,
'type': 'TXT',
'ttl': None}]
self.assertEqual(len(calls), len(expected_calls))
for call, expected_call in zip(calls, expected_calls):
self.assertEqual(call.args[0], expected_call)
self.assertIn("Finished", result.output)
|
|
# -*- coding: utf-8 -*-
"""
The heart and soul of Trigger, NetDevices is an abstract interface to network
device metadata and ACL associations.
Parses :setting:`NETDEVICES_SOURCE` and makes available a dictionary of
`~trigger.netdevices.NetDevice` objects, which is keyed by the FQDN of every
network device.
Other interfaces are non-public.
Example::
>>> from trigger.netdevices import NetDevices
>>> nd = NetDevices()
>>> dev = nd['test1-abc.net.aol.com']
>>> dev.vendor, dev.make
(<Vendor: Juniper>, 'MX960-BASE-AC')
>>> dev.bounce.next_ok('green')
datetime.datetime(2010, 4, 9, 9, 0, tzinfo=<UTC>)
"""
__author__ = 'Jathan McCollum, Eileen Tschetter, Mark Thomas, Michael Shields'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan@gmail.com'
__copyright__ = 'Copyright 2006-2013, AOL Inc.; 2013 Salesforce.com'
__version__ = '2.3.2'
# Imports
import copy
import itertools
import os
import re
import sys
import time
from twisted.python import log
from trigger.conf import settings
from trigger.utils import network, parse_node_port
from trigger.utils.url import parse_url
from trigger import changemgmt, exceptions, rancid
from UserDict import DictMixin
import xml.etree.cElementTree as ET
from . import loader
try:
from trigger.acl.db import AclsDB
except ImportError:
log.msg("ACLs database could not be loaded; Loading without ACL support")
settings.WITH_ACLS = False
# Constants
JUNIPER_COMMIT = ET.Element('commit-configuration')
JUNIPER_COMMIT_FULL = copy.copy(JUNIPER_COMMIT)
ET.SubElement(JUNIPER_COMMIT_FULL, 'full')
# Exports
__all__ = ['device_match', 'NetDevice', 'NetDevices', 'Vendor']
# Functions
def _munge_source_data(data_source=settings.NETDEVICES_SOURCE):
"""
Read the source data in the specified format, parse it, and return a
:param data_source:
Absolute path to source data file
"""
log.msg('LOADING FROM: ', data_source)
kwargs = parse_url(data_source)
path = kwargs.pop('path')
return loader.load_metadata(path, **kwargs)
def _populate(netdevices, data_source, production_only, with_acls):
"""
Populates the NetDevices with NetDevice objects.
Abstracted from within NetDevices to prevent accidental repopulation of NetDevice
objects.
"""
#start = time.time()
device_data = _munge_source_data(data_source=data_source)
# Populate AclsDB if `with_acls` is set
if with_acls:
log.msg("NetDevices ACL associations: ENABLED")
aclsdb = AclsDB()
else:
log.msg("NetDevices ACL associations: DISABLED")
aclsdb = None
# Populate `netdevices` dictionary with `NetDevice` objects!
for obj in device_data:
dev = NetDevice(data=obj, with_acls=aclsdb)
# Only return devices with adminStatus of 'PRODUCTION' unless
# `production_only` is True
if dev.adminStatus.upper() != 'PRODUCTION' and production_only:
log.msg(
'[%s] Skipping: adminStatus not PRODUCTION' % dev.nodeName
)
continue
# These checks should be done on generation of netdevices.xml.
# Skip empty nodenames
if dev.nodeName is None:
continue
# Add to dict
netdevices[dev.nodeName] = dev
#end = time.time()
#print 'Took %f seconds' % (end - start)
def device_match(name, production_only=True):
"""
Return a matching :class:`~trigger.netdevices.NetDevice` object based on
partial name. Return `None` if no match or if multiple matches is
cancelled::
>>> device_match('test')
2 possible matches found for 'test':
[ 1] test1-abc.net.aol.com
[ 2] test2-abc.net.aol.com
[ 0] Exit
Enter a device number: 2
<NetDevice: test2-abc.net.aol.com>
If there is only a single match, that device object is returned without
a prompt::
>>> device_match('fw')
Matched 'fw1-xyz.net.aol.com'.
<NetDevice: fw1-xyz.net.aol.com>
"""
match = None
nd = NetDevices(production_only)
try:
match = nd.find(name)
except KeyError:
matches = nd.search(name)
if matches:
if len(matches) == 1:
single = matches[0]
print "Matched '%s'." % single
return single
print "%d possible matches found for '%s':" % (len(matches), name)
matches.sort()
for num, shortname in enumerate(matches):
print ' [%s] %s' % (str(num+1).rjust(2), shortname)
print ' [ 0] Exit\n'
choice = input('Enter a device number: ') - 1
match = None if choice < 0 else matches[choice]
log.msg('Choice: %s' % choice)
log.msg('You chose: %s' % match)
else:
print "No matches for '%s'." % name
return match
# Classes
class NetDevice(object):
"""
An object that represents a distinct network device and its metadata.
Almost all of the attributes are populated by
`~trigger.netdevices._populate()` and are mostly dependent upon the source
data. This is prone to implementation problems and should be revisited in
the long-run as there are certain fields that are baked into the core
functionality of Trigger.
Users usually won't create these objects directly! Rely instead upon
`~trigger.netdevice.NetDevices` to do this for you.
"""
def __init__(self, data=None, with_acls=None):
# Here comes all of the bare minimum set of attributes a NetDevice
# object needs for basic functionality within the existing suite.
# Hostname
self.nodeName = None
self.nodePort = None
# Hardware Info
self.deviceType = None
self.make = None
self.manufacturer = settings.FALLBACK_MANUFACTURER
self.vendor = None
self.model = None
self.serialNumber = None
# Administrivia
self.adminStatus = settings.DEFAULT_ADMIN_STATUS
self.assetID = None
self.budgetCode = None
self.budgetName = None
self.enablePW = None
self.owningTeam = None
self.owner = None
self.onCallName = None
self.operationStatus = None
self.lastUpdate = None
self.lifecycleStatus = None
self.projectName = None
# Location
self.site = None
self.room = None
self.coordinate = None
# If `data` has been passed, use it to update our attributes
if data is not None:
self._populate_data(data)
# Set node remote port based on "hostname:port" as nodeName
self._set_node_port()
# Cleanup the attributes (strip whitespace, lowercase values, etc.)
self._cleanup_attributes()
# Map the manufacturer name to a Vendor object that has extra sauce
if self.manufacturer is not None:
self.vendor = vendor_factory(self.manufacturer)
# Use the vendor to populate the deviceType if it's not set already
if self.deviceType is None:
self._populate_deviceType()
# ACLs (defaults to empty sets)
self.explicit_acls = self.implicit_acls = self.acls = self.bulk_acls = set()
if with_acls:
log.msg('[%s] Populating ACLs' % self.nodeName)
self._populate_acls(aclsdb=with_acls)
# Bind the correct execute/connect methods based on deviceType
self._bind_dynamic_methods()
# Set the correct command(s) to run on startup based on deviceType
self.startup_commands = self._set_startup_commands()
# Assign the configuration commit commands (e.g. 'write memory')
self.commit_commands = self._set_commit_commands()
# Determine whether we require an async pty SSH channel
self.requires_async_pty = self._set_requires_async_pty()
# Set the correct line-ending per vendor
self.delimiter = self._set_delimiter()
def _populate_data(self, data):
"""
Populate the custom attribute data
:param data:
An iterable of key/value pairs
"""
self.__dict__.update(data) # Better hope this is a dict!
def _cleanup_attributes(self):
"""Perform various cleanup actions. Abstracted for customization."""
# Lowercase the nodeName for completeness.
if self.nodeName is not None:
self.nodeName = self.nodeName.lower()
if self.deviceType is not None:
self.deviceType = self.deviceType.upper()
# Make sure the password is bytes not unicode
if self.enablePW is not None:
self.enablePW = str(self.enablePW)
# Cleanup whitespace from owning team
if self.owningTeam is not None:
self.owningTeam = self.owningTeam.strip()
# Map deviceStatus to adminStatus when data source is RANCID
if hasattr(self, 'deviceStatus'):
STATUS_MAP = {
'up': 'PRODUCTION',
'down': 'NON-PRODUCTION',
}
self.adminStatus = STATUS_MAP.get(self.deviceStatus, STATUS_MAP['up'])
def _set_node_port(self):
"""Set the freakin' TCP port"""
# If nodename is set, try to parse out a nodePort
if self.nodeName is not None:
nodeport_info = parse_node_port(self.nodeName)
nodeName, nodePort = nodeport_info
# If the nodeName differs, use it to replace the one we parsed
if nodeName != self.nodeName:
self.nodeName = nodeName
# If the port isn't set, set it
if nodePort is not None:
self.nodePort = nodePort
return None
# Make sure the port is an integer if it's not None
if self.nodePort is not None and isinstance(self.nodePort, basestring):
self.nodePort = int(self.nodePort)
def _populate_deviceType(self):
"""Try to make a guess what the device type is"""
self.deviceType = settings.DEFAULT_TYPES.get(self.vendor.name,
settings.FALLBACK_TYPE)
def _set_requires_async_pty(self):
"""
Set whether a device requires an async pty (see:
`~trigger.twister.TriggerSSHAsyncPtyChannel`).
"""
RULES = (
self.vendor in ('a10', 'arista', 'aruba', 'cisco', 'force10'),
self.is_brocade_vdx(),
)
return any(RULES)
def _set_delimiter(self):
"""
Set the delimiter to use for line-endings.
"""
default = '\n'
delimiter_map = {
'force10': '\r\n',
}
delimiter = delimiter_map.get(self.vendor.name, default)
return delimiter
def _set_startup_commands(self):
"""
Set the commands to run at startup. For now they are just ones to
disable pagination.
"""
def disable_paging_brocade():
"""Brocade commands differ by platform."""
if self.is_brocade_vdx():
return ['terminal length 0']
else:
return ['skip-page-display']
def disable_paging_cisco():
"""Cisco ASA commands differ from IOS"""
if self.is_cisco_asa():
return ['terminal pager 0']
else:
return default
# Commands used to disable paging.
default = ['terminal length 0']
paging_map = {
'a10': default,
'arista': default,
'aruba': ['no paging'], # v6.2.x this is not necessary
'brocade': disable_paging_brocade(), # See comments above
'cisco': disable_paging_cisco(),
'citrix': ['set cli mode page off'],
'dell': ['terminal datadump'],
'f5': ['modify cli preference pager disabled'],
'force10': default,
'foundry': ['skip-page-display'],
'juniper': ['set cli screen-length 0'],
'mrv': ['no pause'],
'netscreen': ['set console page 0'],
'paloalto': ['set cli scripting-mode on', 'set cli pager off'],
}
cmds = paging_map.get(self.vendor.name)
if self.is_netscreen():
cmds = paging_map['netscreen']
if cmds is not None:
return cmds
return []
def _set_commit_commands(self):
"""
Return the proper "commit" command. (e.g. write mem, etc.)
"""
if self.is_ioslike():
return self._ioslike_commit()
elif self.is_netscaler() or self.is_netscreen():
return ['save config']
elif self.vendor == 'juniper':
return self._juniper_commit()
elif self.vendor == 'paloalto':
return ['commit']
elif self.vendor == 'pica8':
return ['commit']
elif self.vendor == 'mrv':
return ['save configuration flash']
elif self.vendor == 'f5':
return ['save sys config']
else:
return []
def _ioslike_commit(self):
"""
Return proper 'write memory' command for IOS-like devices.
"""
if self.is_brocade_vdx() or self.vendor == 'dell':
return ['copy running-config startup-config', 'y']
elif self.is_cisco_nexus():
return ['copy running-config startup-config']
else:
return ['write memory']
def _juniper_commit(self, fields=settings.JUNIPER_FULL_COMMIT_FIELDS):
"""
Return proper ``commit-configuration`` element for a Juniper
device.
"""
default = [JUNIPER_COMMIT]
if not fields:
return default
# Either it's a normal "commit-configuration"
for attr, val in fields.iteritems():
if not getattr(self, attr) == val:
return default
# Or it's a "commit-configuration full"
return [JUNIPER_COMMIT_FULL]
def _bind_dynamic_methods(self):
"""
Bind dynamic methods to the instance. Currently does these:
+ Dynamically bind ~trigger.twister.excute` to .execute()
+ Dynamically bind ~trigger.twister.connect` to .connect()
Note that these both rely on the value of the ``vendor`` attribute.
"""
from trigger import twister
self.execute = twister.execute.__get__(self, self.__class__)
self.connect = twister.connect.__get__(self, self.__class__)
def _populate_acls(self, aclsdb=None):
"""
Populate the associated ACLs for this device.
:param aclsdb:
An `~trigger.acl.db.AclsDB` object.
"""
if not aclsdb:
return None
acls_dict = aclsdb.get_acl_dict(self)
self.explicit_acls = acls_dict['explicit']
self.implicit_acls = acls_dict['implicit']
self.acls = acls_dict['all']
def __str__(self):
return self.nodeName
def __repr__(self):
return "<NetDevice: %s>" % self.nodeName
def __cmp__(self, other):
if self.nodeName > other.nodeName:
return 1
elif self.nodeName < other.nodeName:
return -1
else:
return 0
@property
def bounce(self):
return changemgmt.bounce(self)
@property
def shortName(self):
return self.nodeName.split('.', 1)[0]
@property
def os(self):
vendor_mapping = settings.TEXTFSM_VENDOR_MAPPINGS
try:
oss = vendor_mapping[self.vendor]
if self.operatingSystem.lower() in oss:
return "{0}_{1}".format(self.vendor, self.operatingSystem.lower())
except:
log.msg("""Unable to find template for given device.
Check to see if your netdevices object has the 'platform' key.
Otherwise template does not exist.""")
return None
def allowable(self, action, when=None):
"""
Return whether it's okay to perform the specified ``action``.
False means a bounce window conflict. For now ``'load-acl'`` is the
only valid action and moratorium status is not checked.
:param action:
The action to check.
:param when:
A datetime object.
"""
assert action == 'load-acl'
return self.bounce.status(when) == changemgmt.BounceStatus('green')
def next_ok(self, action, when=None):
"""
Return the next time at or after the specified time (default now)
that it will be ok to perform the specified action.
:param action:
The action to check.
:param when:
A datetime object.
"""
assert action == 'load-acl'
return self.bounce.next_ok(changemgmt.BounceStatus('green'), when)
def is_router(self):
"""Am I a router?"""
return self.deviceType == 'ROUTER'
def is_switch(self):
"""Am I a switch?"""
return self.deviceType == 'SWITCH'
def is_firewall(self):
"""Am I a firewall?"""
return self.deviceType == 'FIREWALL'
def is_netscaler(self):
"""Am I a NetScaler?"""
return all([self.is_switch(), self.vendor=='citrix'])
def is_pica8(self):
"""Am I a Pica8?"""
## This is only really needed because pica8
## doesn't have a global command to disable paging
## so we need to do some special magic.
return all([self.vendor=='pica8'])
def is_netscreen(self):
"""Am I a NetScreen running ScreenOS?"""
# Are we even a firewall?
if not self.is_firewall():
return False
# If vendor or make is netscreen, automatically True
make_netscreen = self.make is not None and self.make.lower() == 'netscreen'
if self.vendor == 'netscreen' or make_netscreen:
return True
# Final check: Are we made by Juniper and an SSG? This requires that
# make or model is populated and has the word 'ssg' in it. This still
# fails if it's an SSG running JunOS, but this is not an edge case we
# can easily support at this time.
is_ssg = (
(self.model is not None and 'ssg' in self.model.lower()) or
(self.make is not None and 'ssg' in self.make.lower())
)
return self.vendor == 'juniper' and is_ssg
def is_ioslike(self):
"""
Am I an IOS-like device (as determined by :setting:`IOSLIKE_VENDORS`)?
"""
return self.vendor in settings.IOSLIKE_VENDORS
def is_brocade_vdx(self):
"""
Am I a Brocade VDX switch?
This is used to account for the disparity between the Brocade FCX
switches (which behave like Foundry devices) and the Brocade VDX
switches (which behave differently from classic Foundry devices).
"""
if hasattr(self, '_is_brocade_vdx'):
return self._is_brocade_vdx
if not (self.vendor == 'brocade' and self.is_switch()):
self._is_brocade_vdx = False
return False
if self.make is not None:
self._is_brocade_vdx = 'vdx' in self.make.lower()
return self._is_brocade_vdx
def is_cisco_asa(self):
"""
Am I a Cisco ASA Firewall?
This is used to account for slight differences in the commands that
may be used between Cisco's ASA and IOS platforms. Cisco ASA is still
very IOS-like, but there are still several gotcha's between the
platforms.
Will return True if vendor is Cisco and platform is Firewall. This
is to allow operability if using .csv NetDevices and pretty safe to
assume considering ASA (was PIX) are Cisco's flagship(if not only)
Firewalls.
"""
if hasattr(self, '_is_cisco_asa'):
return self._is_cisco_asa
if not (self.vendor == 'cisco' and self.is_firewall()):
self._is_cisco_asa = False
return False
if self.make is not None:
self._is_cisco_asa = 'asa' in self.make.lower()
self._is_cisco_asa = self.vendor == 'cisco' and self.is_firewall()
return self._is_cisco_asa
def is_cisco_nexus(self):
"""
Am I a Cisco Nexus device?
"""
words = (self.make, self.model)
patterns = ('n.k', 'nexus') # Patterns to match
pairs = itertools.product(patterns, words)
for pat, word in pairs:
if word and re.search(pat, word.lower()):
return True
return False
def _ssh_enabled(self, disabled_mapping):
"""Check whether vendor/type is enabled against the given mapping."""
disabled_types = disabled_mapping.get(self.vendor.name, [])
return self.deviceType not in disabled_types
def has_ssh(self):
"""Am I even listening on SSH?"""
return network.test_ssh(self.nodeName)
def _can_ssh(self, method):
"""
Am I enabled to use SSH for the given method in Trigger settings, and
if so do I even have SSH?
:param method: One of ('pty', 'async')
"""
METHOD_MAP = {
'pty': settings.SSH_PTY_DISABLED,
'async': settings.SSH_ASYNC_DISABLED,
}
assert method in METHOD_MAP
method_enabled = self._ssh_enabled(METHOD_MAP[method])
return method_enabled and self.has_ssh()
def can_ssh_async(self):
"""Am I enabled to use SSH async?"""
return self._can_ssh('async')
def can_ssh_pty(self):
"""Am I enabled to use SSH pty?"""
return self._can_ssh('pty')
def is_reachable(self):
"""Do I respond to a ping?"""
return network.ping(self.nodeName)
def dump(self):
"""Prints details for a device."""
dev = self
print
print '\tHostname: ', dev.nodeName
print '\tOwning Org.: ', dev.owner
print '\tOwning Team: ', dev.owningTeam
print '\tOnCall Team: ', dev.onCallName
print
print '\tVendor: ', '%s (%s)' % (dev.vendor.title, dev.manufacturer)
#print '\tManufacturer: ', dev.manufacturer
print '\tMake: ', dev.make
print '\tModel: ', dev.model
print '\tType: ', dev.deviceType
print '\tLocation: ', dev.site, dev.room, dev.coordinate
print
print '\tProject: ', dev.projectName
print '\tSerial: ', dev.serialNumber
print '\tAsset Tag: ', dev.assetID
print '\tBudget Code: ', '%s (%s)' % (dev.budgetCode, dev.budgetName)
print
print '\tAdmin Status: ', dev.adminStatus
print '\tLifecycle Status: ', dev.lifecycleStatus
print '\tOperation Status: ', dev.operationStatus
print '\tLast Updated: ', dev.lastUpdate
print
class Vendor(object):
"""
Map a manufacturer name to Trigger's canonical name.
Given a manufacturer name like 'CISCO SYSTEMS', this will attempt to map it
to the canonical vendor name specified in ``settings.VENDOR_MAP``. If this
can't be done, attempt to split the name up ('CISCO, 'SYSTEMS') and see if
any of the words map. An exception is raised as a last resort.
This exposes a normalized name that can be used in the event of a
multi-word canonical name.
"""
def __init__(self, manufacturer=None):
"""
:param manufacturer:
The literal or "internal" name for a vendor that is to be mapped to
its canonical name.
"""
if manufacturer is None:
raise SyntaxError('You must specify a `manufacturer` name')
self.manufacturer = manufacturer
self.name = self.determine_vendor(manufacturer)
self.title = self.name.title()
self.prompt_pattern = self._get_prompt_pattern(self.name)
def determine_vendor(self, manufacturer):
"""Try to turn the provided vendor name into the cname."""
vendor = settings.VENDOR_MAP.get(manufacturer)
if vendor is None:
mparts = [w for w in manufacturer.lower().split()]
for word in mparts:
if word in settings.SUPPORTED_VENDORS:
vendor = word
break
else:
# Safe fallback to first word
vendor = mparts[0]
return vendor
def _get_prompt_pattern(self, vendor, prompt_patterns=None):
"""
Map the vendor name to the appropriate ``prompt_pattern`` defined in
:setting:`PROMPT_PATTERNS`.
"""
if prompt_patterns is None:
prompt_patterns = settings.PROMPT_PATTERNS
# Try to get it by vendor
pat = prompt_patterns.get(vendor)
if pat is not None:
return pat
# Try to map it by IOS-like vendors...
if vendor in settings.IOSLIKE_VENDORS:
return settings.IOSLIKE_PROMPT_PAT
# Or fall back to the default
return settings.DEFAULT_PROMPT_PAT
@property
def normalized(self):
"""Return the normalized name for the vendor."""
return self.name.replace(' ', '_').lower()
def __str__(self):
return self.name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.title)
def __eq__(self, other):
return self.name.__eq__(Vendor(str(other)).name)
def __contains__(self, other):
return self.name.__contains__(Vendor(str(other)).name)
def __hash__(self):
return hash(self.name)
def lower(self):
return self.normalized
_vendor_registry = {}
def vendor_factory(vendor_name):
"""
Given a full name of a vendor, retrieve or create the canonical
`~trigger.netdevices.Vendor` object.
Vendor instances are cached to improve startup speed.
:param vendor_name:
The vendor's full manufacturer name (e.g. 'CISCO SYSTEMS')
"""
return _vendor_registry.setdefault(vendor_name, Vendor(vendor_name))
class NetDevices(DictMixin):
"""
Returns an immutable Singleton dictionary of
`~trigger.netdevices.NetDevice` objects.
By default it will only return devices for which
``adminStatus=='PRODUCTION'``.
There are hardly any use cases where ``NON-PRODUCTION`` devices are needed,
and it can cause real bugs of two sorts:
1. trying to contact unreachable devices and reporting spurious failures,
2. hot spares with the same ``nodeName``.
You may override this by passing ``production_only=False``.
"""
_Singleton = None
class _actual(object):
"""
This is the real class that stays active upon instantiation. All
attributes are inherited by NetDevices from this object. This means you
do NOT reference ``_actual`` itself, and instead call the methods from
the parent object.
Right::
>>> nd = NetDevices()
>>> nd.search('fw')
[<NetDevice: fw1-xyz.net.aol.com>]
Wrong::
>>> nd._actual.search('fw')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method match() must be called with _actual
instance as first argument (got str instance instead)
"""
def __init__(self, production_only, with_acls):
self._dict = {}
_populate(netdevices=self._dict,
data_source=settings.NETDEVICES_SOURCE,
production_only=production_only, with_acls=with_acls)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, item):
return item in self._dict
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def find(self, key):
"""
Return either the exact nodename, or a unique dot-delimited
prefix. For example, if there is a node 'test1-abc.net.aol.com',
then any of find('test1-abc') or find('test1-abc.net') or
find('test1-abc.net.aol.com') will match, but not find('test1').
:param string key: Hostname prefix to find.
:returns: NetDevice object
"""
key = key.lower()
if key in self:
return self[key]
matches = [x for x in self.keys() if x.startswith(key + '.')]
if matches:
return self[matches[0]]
raise KeyError(key)
def all(self):
"""Returns all NetDevice objects."""
return self.values()
def search(self, token, field='nodeName'):
"""
Returns a list of NetDevice objects where other is in
``dev.nodeName``. The getattr call in the search will allow a
``AttributeError`` from a bogus field lookup so that you
don't get an empty list thinking you performed a legit query.
For example, this::
>>> field = 'bacon'
>>> [x for x in nd.all() if 'ash' in getattr(x, field)]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'NetDevice' object has no attribute 'bacon'
Is better than this::
>>> [x for x in nd.all() if 'ash' in getattr(x, field, '')]
[]
Because then you know that 'bacon' isn't a field you can search on.
:param string token: Token to search match on in @field
:param string field: The field to match on when searching
:returns: List of NetDevice objects
"""
# We could actually just make this call match() to make this
# case-insensitive as well. But we won't yet because of possible
# implications in outside dependencies.
#return self.match(**{field:token})
return [x for x in self.all() if token in getattr(x, field)]
def match(self, **kwargs):
"""
Attempt to match values to all keys in @kwargs by dynamically
building a list comprehension. Will throw errors if the keys don't
match legit NetDevice attributes.
Keys and values are case IN-senstitive. Matches against non-string
values will FAIL.
Example by reference::
>>> nd = NetDevices()
>>> myargs = {'onCallName':'Data Center', 'model':'FCSLB'}
>>> mydevices = nd(**myargs)
Example by keyword arguments::
>>> mydevices = nd(oncallname='data center', model='fcslb')
:returns: List of NetDevice objects
"""
all_field_names = getattr(self, '_all_field_names', {})
# Cache the field names the first time .match() is called.
if not all_field_names:
# Merge in field_names from every NetDevice
for dev in self.all():
dev_fields = ((f.lower(), f) for f in dev.__dict__)
all_field_names.update(dev_fields)
self._all_field_names = all_field_names
# An iterator so we can filtering functionally
devices = iter(self.all())
def map_attr(attr):
"""Helper function for lower-to-regular attribute mapping."""
return self._all_field_names[attr.lower()]
# Use list comp. to keep filtering out the devices.
for attr, val in kwargs.iteritems():
attr = map_attr(attr)
val = str(val).lower()
devices = [
d for d in devices if (
val in str(getattr(d, attr, '')).lower()
)
]
return devices
def get_devices_by_type(self, devtype):
"""
Returns a list of NetDevice objects with deviceType matching type.
Known deviceTypes: ['FIREWALL', 'ROUTER', 'SWITCH']
"""
return [x for x in self._dict.values() if x.deviceType == devtype]
def list_switches(self):
"""Returns a list of NetDevice objects with deviceType of SWITCH"""
return self.get_devices_by_type('SWITCH')
def list_routers(self):
"""Returns a list of NetDevice objects with deviceType of ROUTER"""
return self.get_devices_by_type('ROUTER')
def list_firewalls(self):
"""Returns a list of NetDevice objects with deviceType of FIREWALL"""
return self.get_devices_by_type('FIREWALL')
def __init__(self, production_only=True, with_acls=None):
"""
:param production_only:
Whether to require devices to have ``adminStatus=='PRODUCTION'``.
:param with_acls:
Whether to load ACL associations (requires Redis). Defaults to whatever
is specified in settings.WITH_ACLS
"""
if with_acls is None:
with_acls = settings.WITH_ACLS
classobj = self.__class__
if classobj._Singleton is None:
classobj._Singleton = classobj._actual(production_only=production_only,
with_acls=with_acls)
def __getattr__(self, attr):
return getattr(self.__class__._Singleton, attr)
def __setattr__(self, attr, value):
return setattr(self.__class__._Singleton, attr, value)
|
|
#!/usr/bin/env python
#
# restrict_long_contigs.py
#
# USAGE: restrict_long_contigs.py [options] <input_directory> \
# <output_directory>
#
# Options:
# -h, --help show this help message and exit
# -l MINLEN, --minlen=MINLEN
# Minimum length of sequence
# -s SUFFIX, --filesuffix=SUFFIX
# Suffix to indicate the file was processed
# -v, --verbose Give verbose output
#
# Non-PSL dependencies: Biopython (www.biopython.org)
#
# A short script that takes as input a directory containing (many) FASTA files
# describing biological sequences, and writes to a new, named directory
# multiple FASTA files containing the same sequences, but restricted only to
# those sequences whose length is greater than a passed value.
#
# Example usage: You have a directory with many sets of contigs from different
# assemblies. This script will produce a new directory of the same data where
# the contig lengths are restricted to being greater than a specified length.
#
# Copyright (C) 2013 The James Hutton Institute
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
# IMPORTS
from Bio import SeqIO
from optparse import OptionParser
import logging
import logging.handlers
import os
import re
import sys
###
# GLOBALS
# File extensions that indicate FASTA content
fasta_ext = ['.fa', '.fas', '.fasta']
###
# FUNCTIONS
# Parse cmd-line
def parse_cmdline(args):
""" Parse command-line arguments. Note that the input and output
directories are positional arguments
"""
usage = "usage: %prog [options] <input_directory> <output_directory>"
parser = OptionParser(usage)
parser.add_option("-l", "--minlen", dest="minlen",
action="store", default=1000,
help="Minimum length of sequence")
parser.add_option("-s", "--filesuffix", dest="suffix",
action="store", default="_restricted",
help="Suffix to indicate the file was processed")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Give verbose output")
return parser.parse_args()
# Get list of FASTA files from a directory
def get_fasta_filenames(indir, extensions=fasta_ext):
""" Identifies files in the passed directory whose extensions indicate
that they may be FASTA files. Returns the path to the file,
including the parent directory.
"""
filelist = [f for f in os.listdir(indir) if
os.path.splitext(f)[-1].lower() in extensions]
logger.info("Identified %d FASTA files in %s:" % (len(filelist),
indir))
if not len(filelist): # We want there to be at least one file
logger.error("No FASTA files found in %s" % indir)
sys.exit(1)
return filelist
# Restrict sequence length in a named FASTA file, writing it to
# the named location
def restrict_seq_length(infile, outfile, minlen):
""" Takes an input FASTA file as infile, and writes out a corresponding
file to outfile, where sequences shorter than minlen are not included
"""
logger.info("Restricting lengths of %s to >=%d;" % (infile, minlen) +
" writing to %s" % outfile)
SeqIO.write([s for s in SeqIO.parse(infile, 'fasta')
if not len(s) < minlen],
outfile, 'fasta')
# Process FASTA files in the directory
def process_files(indir, outdir, minlen, suffix):
""" Takes an input directory that contains FASTA files, and writes
to the output directory corresponding files (with the suffix appended)
that contain only sequences of length greater than minlen.
"""
for filename in get_fasta_filenames(indir):
filestem, ext = os.path.splitext(filename)
infilename = os.path.join(indir, filename)
outfilename = os.path.join(outdir, ''.join([filestem, suffix, ext]))
restrict_seq_length(infilename, outfilename, minlen)
###
# SCRIPT
if __name__ == '__main__':
# Parse command-line
# options are options, arguments are the .sff files
options, args = parse_cmdline(sys.argv)
# We set up logging, and modify loglevel according to whether we need
# verbosity or not
logger = logging.getLogger('restrict_long_contigs.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
if options.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info(options)
logger.info(args)
# If there are not two positional arguments, throw an error
if len(args) != 2:
logger.error("Not enough arguments: script requires input and " +
"output directory")
sys.exit(1)
indir, outdir = tuple(args)
# Make sure that the input directory exists
if not os.path.isdir(indir):
logger.error("Input directory %s does not exist" % indir)
sys.exit(1)
# If output directory does not exist, create it. If it does exist,
# issue a warning that contents may be overwritten
if os.path.isdir(outdir):
logger.warning("Contents of %s may be overwritten" % outdir)
else:
logger.warning("Output directory %s does not exist: creating it" %
outdir)
os.mkdir(outdir)
# Check that the passed suffix is a valid string: escape dodgy characters
#try:
# suffix = re.escape(options.suffix)
#except:
# logger.error("Could not escape suffix string: %s" % options.suffix)
# sys.exit(1)
# Make sure that the minimum length is an integer, and positive
if not int(options.minlen) > 0:
logger.error("Minimum length must be a positive integer, got %s" %
options.minlen)
sys.exit(1)
# Restrict sequence lengths
process_files(indir, outdir, int(options.minlen), options.suffix)
|
|
from abc import ABC, abstractmethod
from typing import *
from datetime import timedelta
from enum import Enum
from durationpy import from_str
from couchbase.options import QueryBaseOptions, enum_value
from couchbase_core.mapper import identity
from .n1ql import *
from couchbase_core.n1ql import N1QLRequest
from couchbase_core.analytics import AnalyticsQuery, AnalyticsRequest
from couchbase_core import iterable_wrapper, mk_formstr
from couchbase.exceptions import InvalidArgumentException
class AnalyticsIndex(dict):
def __init__(self, **kwargs):
#print("creating index from {}".format(kwargs))
super(AnalyticsIndex, self).__init__(**kwargs['Index'])
@property
def name(self):
return self.get("IndexName", None)
@property
def dataset_name(self):
return self.get("DatasetName", None)
@property
def dataverse_name(self):
return self.get("DataverseName", None)
@property
def is_primary(self):
return self.get("IsPrimary", None)
class AnalyticsDataType(Enum):
STRING = 'string'
INT64 = 'int64'
DOUBLE = 'double'
class AnalyticsLinkType(Enum):
S3External = 's3'
AzureBlobExternal = 'azureblob'
CouchbaseRemote = 'couchbase'
class AnalyticsEncryptionLevel(Enum):
NONE = 'none'
HALF = 'half'
FULL = 'full'
class AnalyticsDataset(dict):
def __init__(self, **kwargs):
super(AnalyticsDataset, self).__init__(**kwargs)
@property
def dataset_name(self):
return self.get("DatasetName", None)
@property
def dataverse_name(self):
return self.get('DataverseName', None)
@property
def link_name(self):
return self.get('LinkName', None)
@property
def bucket_name(self):
return self.get('BucketName', None)
class AnalyticsLink(ABC):
"""AnalytcsLinks are only available on Couchbase Server 7.0+
"""
@abstractmethod
def name(
self, # type: "AnalyticsLink"
) -> str:
"""Returns the name of the :class:`couchbase.analytics.AnalyticsLink`
:return: The name of the :class:`couchbase.analytics.AnalyticsLink`
"""
pass
@abstractmethod
def dataverse_name(
self, # type: "AnalyticsLink"
) -> str:
"""Returns the name of the dataverse the :class:`couchbase.analytics.AnalyticsLink` belongs to
:return: The name of the dataverse
"""
pass
@abstractmethod
def form_encode(
self, # type: "AnalyticsLink"
) -> bytes:
"""Encodes the :class:`couchbase.analytics.AnalyticsLink` into a form data representation,
to send as the body of a :func:`couchbase.management.analytics.CreateLink` or
:func:`couchbase.management.analytics.ReplaceLink`
:return: A form encoded :class:`couchbase.analytics.AnalyticsLink`
"""
pass
@abstractmethod
def validate(
self, # type: "AnalyticsLink"
):
"""Ensures the :class:`couchbase.analytics.AnalyticsLink` is valid. Raises a :class:`couchbase.exceptions.InvalidArgumentException` if link is invalid.
:return: None
:raises: :class:`couchbase.exceptions.InvalidArgumentException`
"""
pass
@abstractmethod
def link_type(
self, # type: "AnalyticsLink"
) -> AnalyticsLinkType:
"""Returns the :class:`couchbase.analytics.AnalyticsLinkType` of the :class:`couchbase.analytics.AnalyticsLink`
:return: The corresponding :class:`couchbase.analytics.AnalyticsLinkType` of the :class:`couchbase.analytics.AnalyticsLink`
"""
pass
class CouchbaseAnalyticsEncryptionSettings(object):
"""The settings available for setting encryption level on a link.
:param encryption_level: The level of encryption to apply, defaults to :class:`couchbase.analytics.AnalyticsEncryptionLevel`.NONE
:type encryption_level: :class:`couchbase.analytics.AnalyticsEncryptionLevel`
:param certificate: The certificate to use when encryption level is set to full. Must be set if encryption level is set to full. Defaults to None.
:type certificate: bytes | bytearray
:param client_certificate: The client certificate to use when encryption level is set to full. Cannot be used if username and password are also used. Defaults to None
:type client_certificate: bytes | bytearray
:param client_key: The client key to use when encryption level is set to full. Cannot be used if username and password are also used. Defaults to None
:type client_key: bytes | bytearray
"""
def __init__(
self, # type: "CouchbaseAnalyticsEncryptionSettings"
encryption_level=None, # type: AnalyticsEncryptionLevel
certificate=None, # type: Union[bytes, bytearray]
client_certificate=None, # type: Union[bytes, bytearray]
client_key=None, # type: Union[bytes, bytearray]
):
self._encryption_level = encryption_level
if self._encryption_level is None:
self._encryption_level = AnalyticsEncryptionLevel.NONE
self._certificate = certificate
self._client_certificate = client_certificate
self._client_key = client_key
@property
def encryption_level(self):
return self._encryption_level
@encryption_level.setter
def encryption_level(self, value):
self._encryption_level = value
@property
def certificate(self):
return self._certificate
@certificate.setter
def certificate(self, value):
self._certificate = value
@property
def client_certificate(self):
return self._client_certificate
@client_certificate.setter
def client_certificate(self, value):
self._client_certificate = value
@property
def client_key(self):
return self._client_key
@client_key.setter
def client_key(self, value):
self._client_key = value
@classmethod
def from_server_json(
cls, # type: "CouchbaseAnalyticsEncryptionSettings"
raw_data # type: dict
) -> "CouchbaseAnalyticsEncryptionSettings":
encryption_settings = CouchbaseAnalyticsEncryptionSettings()
if raw_data["encryption"] == AnalyticsEncryptionLevel.NONE.value:
encryption_settings.encryption_level = AnalyticsEncryptionLevel.NONE
elif raw_data["encryption"] == AnalyticsEncryptionLevel.HALF.value:
encryption_settings.encryption_level = AnalyticsEncryptionLevel.HALF
elif raw_data["encryption"] == AnalyticsEncryptionLevel.FULL.value:
encryption_settings.encryption_level = AnalyticsEncryptionLevel.FULL
if "certificate" in raw_data and raw_data["certificate"] and raw_data["certificate"].split(
):
encryption_settings.certificate = bytes(
raw_data["certificate"], "utf-8")
if "clientCertificate" in raw_data and raw_data["clientCertificate"] and raw_data["clientCertificate"].split(
):
encryption_settings.certificate = bytes(
raw_data["clientCertificate"], "utf-8")
return encryption_settings
def is_null_or_empty(
value # type: str
) -> bool:
return not (value and value.split())
class CouchbaseRemoteAnalyticsLink(AnalyticsLink):
def __init__(
self, # type: "CouchbaseRemoteAnalyticsLink"
dataverse, # type: str
link_name, # type: str
hostname, # type: str
encryption, # type: CouchbaseAnalyticsEncryptionSettings
username=None, # type: str
password=None, # type: str
):
super().__init__()
self._dataverse = dataverse
self._link_name = link_name
self._hostname = hostname
self._encryption = encryption
self._username = username
self._password = password
def name(
self, # type: "CouchbaseRemoteAnalyticsLink"
) -> str:
return self._link_name
def dataverse_name(
self, # type: "CouchbaseRemoteAnalyticsLink"
) -> str:
return self._dataverse
def form_encode(self: "CouchbaseRemoteAnalyticsLink") -> bytes:
params = {}
if "/" not in self._dataverse:
params["dataverse"] = self._dataverse
params["name"] = self._link_name
params["hostname"] = self._hostname
params["type"] = AnalyticsLinkType.CouchbaseRemote.value
params["encryption"] = self._encryption.encryption_level.value
if not is_null_or_empty(self._username):
params["username"] = self._username
if not is_null_or_empty(self._password):
params["password"] = self._password
if self._encryption.certificate and len(
self._encryption.certificate) > 0:
params["certificate"] = self._encryption.certificate.decode(
"utf-8")
if self._encryption.client_certificate and len(
self._encryption.client_certificate) > 0:
params["clientCertificate"] = self._encryption.client_certificate.decode(
"utf-8")
if self._encryption.client_key and len(
self._encryption.client_key) > 0:
params["clientKey"] = self._encryption.client_key.decode("utf-8")
return mk_formstr(params).encode()
def validate(self: "CouchbaseRemoteAnalyticsLink"):
if is_null_or_empty(self._dataverse):
raise InvalidArgumentException(
"Dataverse must be set for couchbase analytics links.")
if is_null_or_empty(self._link_name):
raise InvalidArgumentException(
"Link name must be set for couchbase analytics links.")
if is_null_or_empty(self._hostname):
raise InvalidArgumentException(
"Hostname must be set for couchbase analytics links.")
if self._encryption.encryption_level in [
AnalyticsEncryptionLevel.NONE, AnalyticsEncryptionLevel.HALF]:
if is_null_or_empty(
self._username) or is_null_or_empty(self._password):
raise InvalidArgumentException(
"When encryption level is half or none, username and password must be set for couchbase analytics links.")
elif self._encryption.encryption_level == AnalyticsEncryptionLevel.FULL:
if not (self._encryption.certificate and len(
self._encryption.certificate) > 0):
raise InvalidArgumentException(
"When encryption level is full a certificate must be set for couchbase analytics links.")
if not ((self._encryption.client_certificate and len(self._encryption.client_certificate) > 0)
and (self._encryption.client_key and len(self._encryption.client_key) > 0)):
raise InvalidArgumentException(
"When encryption level is full the client certificate and key must be set for couchbase analytics links.")
def link_type(self: "CouchbaseRemoteAnalyticsLink") -> AnalyticsLinkType:
return AnalyticsLinkType.CouchbaseRemote
@classmethod
def link_from_server_json(
cls, # type: "CouchbaseRemoteAnalyticsLink"
raw_data, # type: dict
) -> "CouchbaseRemoteAnalyticsLink":
dataverse = raw_data["dataverse"] if "dataverse" in raw_data else raw_data["scope"]
link_name = raw_data["name"]
hostname = raw_data["activeHostname"]
encryption = CouchbaseAnalyticsEncryptionSettings.from_server_json(
raw_data)
username = raw_data["username"]
return CouchbaseRemoteAnalyticsLink(
dataverse, link_name, hostname, encryption, username)
class S3ExternalAnalyticsLink(AnalyticsLink):
def __init__(
self, # type: "S3ExternalAnalyticsLink"
dataverse, # type: str
link_name, # type: str
access_key_id, # type: str
region, # type: str
secret_access_key=None, # type: str
session_token=None, # type: str
service_endpoint=None, # type: str
):
super().__init__()
self._dataverse = dataverse
self._link_name = link_name
self._access_key_id = access_key_id
self._region = region
self._secret_access_key = secret_access_key
self._session_token = session_token
self._service_endpoint = service_endpoint
def name(
self, # type: "S3ExternalAnalyticsLink"
) -> str:
return self._link_name
def dataverse_name(
self, # type: "S3ExternalAnalyticsLink"
) -> str:
return self._dataverse
def form_encode(self: "S3ExternalAnalyticsLink") -> bytes:
params = {}
if "/" not in self._dataverse:
params["dataverse"] = self._dataverse
params["name"] = self._link_name
params["type"] = AnalyticsLinkType.S3External.value
params["accessKeyId"] = self._access_key_id
params["secretAccessKey"] = self._secret_access_key
params["region"] = self._region
if not is_null_or_empty(self._session_token):
params["sessionToken"] = self._session_token
if not is_null_or_empty(self._service_endpoint):
params["serviceEndpoint"] = self._service_endpoint
return mk_formstr(params).encode()
def validate(self: "S3ExternalAnalyticsLink"):
if is_null_or_empty(self._dataverse):
raise InvalidArgumentException(
"Dataverse must be set for S3 external analytics links.")
if is_null_or_empty(self._link_name):
raise InvalidArgumentException(
"Link name must be set for S3 external analytics links.")
if is_null_or_empty(self._access_key_id):
raise InvalidArgumentException(
"Access key id must be set for S3 external analytics links.")
if is_null_or_empty(self._secret_access_key):
raise InvalidArgumentException(
"Secret access key must be set for S3 external analytics links.")
if is_null_or_empty(self._region):
raise InvalidArgumentException(
"Region must be set for S3 external analytics links.")
def link_type(self: "S3ExternalAnalyticsLink") -> AnalyticsLinkType:
return AnalyticsLinkType.S3External
@classmethod
def link_from_server_json(
cls, # type: "S3ExternalAnalyticsLink"
raw_data, # type: dict
) -> "S3ExternalAnalyticsLink":
dataverse = raw_data["dataverse"] if "dataverse" in raw_data else raw_data["scope"]
link_name = raw_data["name"]
access_key_id = raw_data["accessKeyId"]
region = raw_data["region"]
service_endpoint = raw_data["serviceEndpoint"]
return S3ExternalAnalyticsLink(
dataverse, link_name, access_key_id, region, service_endpoint=service_endpoint)
class AzureBlobExternalAnalyticsLink(AnalyticsLink):
def __init__(
self, # type: "AzureBlobExternalAnalyticsLink"
dataverse, # type: str
link_name, # type: str
connection_string=None, # type: str
account_name=None, # type: str
account_key=None, # type: str
shared_access_signature=None, # type: str
blob_endpoint=None, # type: str
endpiont_suffix=None, # type: str
):
super().__init__()
self._dataverse = dataverse
self._link_name = link_name
self._connection_string = connection_string
self._account_name = account_name
self._account_key = account_key
self._shared_access_signature = shared_access_signature
self._blob_endpoint = blob_endpoint
self._endpiont_suffix = endpiont_suffix
def name(
self, # type: "AzureBlobExternalAnalyticsLink"
) -> str:
return self._link_name
def dataverse_name(
self, # type: "AzureBlobExternalAnalyticsLink"
) -> str:
return self._dataverse
def form_encode(self: "AzureBlobExternalAnalyticsLink") -> bytes:
params = {}
if "/" not in self._dataverse:
params["dataverse"] = self._dataverse
params["name"] = self._link_name
params["type"] = AnalyticsLinkType.AzureBlobExternal.value
if not is_null_or_empty(self._connection_string):
params["connectionString"] = self._connection_string
if not is_null_or_empty(self._account_name):
params["accountName"] = self._account_name
if not is_null_or_empty(self._account_key):
params["accountKey"] = self._account_key
if not is_null_or_empty(self._shared_access_signature):
params["sharedAccessSignature"] = self._shared_access_signature
if not is_null_or_empty(self._blob_endpoint):
params["blobEndpoint"] = self._blob_endpoint
if not is_null_or_empty(self._endpiont_suffix):
params["endpointSuffix"] = self._endpiont_suffix
return mk_formstr(params).encode()
def validate(self: "AzureBlobExternalAnalyticsLink"):
if is_null_or_empty(self._dataverse):
raise InvalidArgumentException(
"Dataverse must be set for Azure blob external analytics links.")
if is_null_or_empty(self._link_name):
raise InvalidArgumentException(
"Link name must be set for Azure blob external analytics links.")
if is_null_or_empty(self._connection_string):
acct_name_and_key = not (is_null_or_empty(self._account_name)
or is_null_or_empty(self._account_key))
acct_name_and_sas = not (is_null_or_empty(self._account_name)
or is_null_or_empty(self._shared_access_signature))
if not (acct_name_and_key or acct_name_and_sas):
raise InvalidArgumentException(
"AccessKeyId must be set for Azure blob external analytics links.")
def link_type(self: "AzureBlobExternalAnalyticsLink") -> AnalyticsLinkType:
return AnalyticsLinkType.AzureBlobExternal
@classmethod
def link_from_server_json(
cls, # type: "AzureBlobExternalAnalyticsLink"
raw_data, # type: dict
) -> "AzureBlobExternalAnalyticsLink":
dataverse = raw_data["dataverse"] if "dataverse" in raw_data else raw_data["scope"]
link_name = raw_data["name"]
account_name = raw_data["accountName"]
blob_endpoint = raw_data["blobEndpoint"]
endpoint_suffix = raw_data["endpointSuffix"]
return AzureBlobExternalAnalyticsLink(dataverse,
link_name,
account_name=account_name,
blob_endpoint=blob_endpoint,
endpiont_suffix=endpoint_suffix)
class AnalyticsResult(iterable_wrapper(AnalyticsRequest)):
def __init__(self,
*args, **kwargs # type: N1QLRequest
):
super(AnalyticsResult, self).__init__(*args, **kwargs)
def metadata(self # type: AnalyticsResult
):
# type: (...) -> AnalyticsMetaData
return AnalyticsMetaData(self)
class AnalyticsScanConsistency(enum.Enum):
NOT_BOUNDED = "not_bounded"
REQUEST_PLUS = "request_plus"
class AnalyticsOptions(QueryBaseOptions):
VALID_OPTS = {'timeout': {'timeout': timedelta.seconds},
'read_only': {'readonly': identity},
'scan_consistency': {'consistency': enum_value},
'client_context_id': {'client_context_id': identity},
'priority': {'priority': identity},
'positional_parameters': {},
'named_parameters': {},
'query_context': {'query_context': identity},
'raw': {}}
TARGET_CLASS = AnalyticsQuery
@overload
def __init__(self,
timeout=None, # type: timedelta
read_only=None, # type: bool
scan_consistency=None, # type: AnalyticsScanConsistency
client_context_id=None, # type: str
priority=None, # type: bool
positional_parameters=None, # type: Iterable[str]
named_parameters=None, # type: Dict[str, str]
query_context=None, # type: str
raw=None, # type: Dict[str,Any]
):
"""
:param timedelta timeout:
:param bool read_only:
:param AnalyticsScanConsistency scan_consistency:
:param str client_context_id:
:param bool priority:
:param Iterable[JSON] positional_parameters:
:param dict[str,JSON] named_parameters:
:param str query_context:
:param dict[str,JSON] raw:
"""
pass
def __init__(self,
**kwargs
):
super(AnalyticsOptions, self).__init__(**kwargs)
class AnalyticsStatus(enum.Enum):
RUNNING = ()
SUCCESS = ()
ERRORS = ()
COMPLETED = ()
STOPPED = ()
TIMEOUT = ()
CLOSED = ()
FATAL = ()
ABORTED = ()
UNKNOWN = ()
class AnalyticsWarning(object):
def __init__(self, raw_warning):
self._raw_warning = raw_warning
def code(self):
# type: (...) -> int
return self._raw_warning.get('code')
def message(self):
# type: (...) -> str
return self._raw_warning.get('msg')
class AnalyticsMetrics(object):
def __init__(self,
parent # type: AnalyticsResult
):
self._parentquery = parent
@property
def _raw_metrics(self):
return self._parentquery.metrics
def _as_timedelta(self, time_str):
return from_str(self._raw_metrics.get(time_str))
def elapsed_time(self):
# type: (...) -> timedelta
return self._as_timedelta('elapsedTime')
def execution_time(self):
# type: (...) -> timedelta
return self._as_timedelta('executionTime')
def result_count(self):
# type: (...) -> UnsignedInt64
return UnsignedInt64(self._raw_metrics.get('resultCount', 0))
def result_size(self):
# type: (...) -> UnsignedInt64
return UnsignedInt64(self._raw_metrics.get('resultSize', 0))
def error_count(self):
# type: (...) -> UnsignedInt64
return UnsignedInt64(self._raw_metrics.get('errorCount', 0))
def processed_objects(self):
# type: (...) -> UnsignedInt64
return UnsignedInt64(self._raw_metrics.get('processedObjects', 0))
def warning_count(self):
# type: (...) -> UnsignedInt64
return UnsignedInt64(self._raw_metrics.get('warningCount', 0))
class AnalyticsMetaData(object):
def __init__(self,
parent # type: AnalyticsResult
):
self._parentquery_for_metadata = parent
def request_id(self):
# type: (...) -> str
return self._parentquery_for_metadata.meta.get('requestID')
def client_context_id(self):
# type: (...) -> str
return self._parentquery_for_metadata.meta.get('clientContextID')
def signature(self):
# type: (...) -> Optional[JSON]
return self._parentquery_for_metadata.meta.get('signature')
def status(self):
# type: (...) -> AnalyticsStatus
return AnalyticsStatus[self._parentquery_for_metadata.meta.get(
'status').upper()]
def warnings(self):
# type: (...) -> List[AnalyticsWarning]
return list(
map(AnalyticsWarning, self._parentquery_for_metadata.meta.get('warnings', [])))
def metrics(self):
# type: (...) -> Optional[AnalyticsMetrics]
return AnalyticsMetrics(self._parentquery_for_metadata)
|
|
"""Tests for go.vumitools.middleware"""
import time
from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface import implements
from vumi.transports.failures import FailureMessage
from vumi.message import TransportUserMessage
from vumi.middleware.tagger import TaggingMiddleware
from vumi.tests.helpers import VumiTestCase, generate_proxies, IHelper
from vumi.worker import BaseWorker
from go.vumitools.app_worker import GoWorkerMixin, GoWorkerConfigMixin
from go.vumitools.middleware import (
NormalizeMsisdnMiddleware, OptOutMiddleware, MetricsMiddleware,
ConversationStoringMiddleware, RouterStoringMiddleware,
ConversationMetricsMiddleware)
from go.vumitools.tests.helpers import VumiApiHelper, GoMessageHelper
class ToyWorkerConfig(BaseWorker.CONFIG_CLASS, GoWorkerConfigMixin):
pass
class ToyWorker(BaseWorker, GoWorkerMixin):
CONFIG_CLASS = ToyWorkerConfig
def setup_worker(self):
return self._go_setup_worker()
def teardown_worker(self):
return self._go_teardown_worker()
def setup_connectors(self):
pass
class MiddlewareHelper(object):
implements(IHelper)
def __init__(self, middleware_class):
self._vumi_helper = VumiApiHelper()
self._msg_helper = GoMessageHelper()
self.middleware_class = middleware_class
self._middlewares = []
generate_proxies(self, self._vumi_helper)
generate_proxies(self, self._msg_helper)
def setup(self):
return self._vumi_helper.setup(setup_vumi_api=False)
@inlineCallbacks
def cleanup(self):
for mw in self._middlewares:
yield mw.teardown_middleware()
yield self._vumi_helper.cleanup()
@inlineCallbacks
def create_middleware(self, config=None, middleware_class=None,
name='dummy_middleware'):
worker_helper = self._vumi_helper.get_worker_helper()
dummy_worker = yield worker_helper.get_worker(
ToyWorker, self.mk_config({}))
config = self.mk_config(config or {})
if middleware_class is None:
middleware_class = self.middleware_class
mw = middleware_class(name, config, dummy_worker)
self._middlewares.append(mw)
yield mw.setup_middleware()
returnValue(mw)
class TestNormalizeMisdnMiddleware(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mw_helper = self.add_helper(
MiddlewareHelper(NormalizeMsisdnMiddleware))
self.mw = yield self.mw_helper.create_middleware({
'country_code': '256',
})
def test_inbound_normalization(self):
msg = self.mw_helper.make_inbound(
"foo", to_addr='8007', from_addr='256123456789')
msg = self.mw.handle_inbound(msg, 'dummy_endpoint')
self.assertEqual(msg['from_addr'], '+256123456789')
def test_inbound_normalization_of_null_from_addr(self):
msg = self.mw_helper.make_inbound(
"foo", to_addr='8007', from_addr=None)
msg = self.mw.handle_inbound(msg, 'dummy_endpoint')
self.assertEqual(msg['from_addr'], None)
@inlineCallbacks
def test_inbound_normalization_ignores_strip_plus(self):
mw = yield self.mw_helper.create_middleware({
'country_code': '256',
'strip_plus': True,
})
msg = self.mw_helper.make_inbound(
"foo", to_addr='8007', from_addr='+256123456789')
msg = mw.handle_inbound(msg, 'dummy_endpoint')
self.assertEqual(msg['from_addr'], '+256123456789')
def test_outbound_normalization(self):
msg = self.mw_helper.make_outbound(
"foo", to_addr='0123456789', from_addr='8007')
msg = self.mw.handle_outbound(msg, 'dummy_endpoint')
self.assertEqual(msg['to_addr'], '+256123456789')
def test_outbound_normalization_of_null_to_addr(self):
msg = self.mw_helper.make_outbound(
"foo", to_addr=None, from_addr='8007')
msg = self.mw.handle_outbound(msg, 'dummy_endpoint')
self.assertEqual(msg['to_addr'], None)
@inlineCallbacks
def test_outbound_normalization_applies_strip_plus(self):
mw = yield self.mw_helper.create_middleware({
'country_code': '256',
'strip_plus': True,
})
msg = self.mw_helper.make_outbound(
"foo", to_addr='0123456789', from_addr='8007')
msg = mw.handle_outbound(msg, 'dummy_endpoint')
self.assertEqual(msg['to_addr'], '256123456789')
class TestOptOutMiddleware(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mw_helper = self.add_helper(MiddlewareHelper(OptOutMiddleware))
yield self.mw_helper.setup_vumi_api()
self.config = {
'optout_keywords': ['STOP', 'HALT', 'QUIT']
}
@inlineCallbacks
def get_middleware(self, extra_config={}, extra_tagpool_metadata={}):
config = self.config.copy()
config.update(extra_config)
mw = yield self.mw_helper.create_middleware(config)
tagpool_metadata = {
"transport_type": "other",
"msg_options": {"transport_name": "other_transport"},
}
tagpool_metadata.update(extra_tagpool_metadata)
yield self.mw_helper.setup_tagpool(
"pool", ["tag1"], metadata=tagpool_metadata)
returnValue(mw)
@inlineCallbacks
def send_keyword(self, mw, word, expected_response):
msg = self.mw_helper.make_inbound(
word, to_addr='to@domain.org', from_addr='from@domain.org')
TaggingMiddleware.add_tag_to_msg(msg, ("pool", "tag1"))
yield mw.handle_inbound(msg, 'dummy_endpoint')
expected_response = dict(expected_response,
tag={'tag': ['pool', 'tag1']})
# MessageMetadataHelper can add 'go' metadata and we want to ignore it.
if 'go' in msg['helper_metadata']:
expected_response['go'] = msg['helper_metadata']['go']
self.assertEqual(msg['helper_metadata'], expected_response)
@inlineCallbacks
def test_optout_flag(self):
mw = yield self.get_middleware()
for keyword in self.config['optout_keywords']:
yield self.send_keyword(mw, keyword, {
'optout': {
'optout': True,
'optout_keyword': keyword.lower(),
}
})
@inlineCallbacks
def test_non_optout_keywords(self):
mw = yield self.get_middleware()
for keyword in ['THESE', 'DO', 'NOT', 'OPT', 'OUT']:
yield self.send_keyword(mw, keyword, {
'optout': {'optout': False},
})
@inlineCallbacks
def test_disabled_by_tagpool(self):
mw = yield self.get_middleware(extra_tagpool_metadata={
"disable_global_opt_out": True,
})
yield self.send_keyword(mw, 'STOP', {
'optout': {'optout': False},
})
@inlineCallbacks
def test_case_sensitivity(self):
mw = yield self.get_middleware({'case_sensitive': True})
yield self.send_keyword(mw, 'STOP', {
'optout': {
'optout': True,
'optout_keyword': 'STOP',
}
})
yield self.send_keyword(mw, 'stop', {
'optout': {
'optout': False,
}
})
class TestMetricsMiddleware(VumiTestCase):
def setUp(self):
self.mw_helper = self.add_helper(MiddlewareHelper(MetricsMiddleware))
def get_middleware(self, config):
default_config = {
'manager_name': 'metrics_manager',
'count_suffix': 'counter',
'response_time_suffix': 'timer',
}
default_config.update(config or {})
return self.mw_helper.create_middleware(default_config)
def assert_metrics(self, mw, metrics):
for metric_name, expected in metrics.items():
metric = mw.metric_manager[metric_name]
metric_values = [m[1] for m in metric.poll()]
if not isinstance(expected, dict):
expected = {'values': expected}
expected_values = expected.get('values')
if callable(expected_values):
self.assertTrue(all(
expected_values(v) for v in metric_values))
else:
self.assertEqual(metric_values, expected_values)
expected_aggs = expected.get('aggs', ['sum'])
self.assertEqual(set(metric.aggs), set(expected_aggs))
def assert_metrics_absent(self, mw, metrics):
for metric_name in metrics:
self.assertFalse(metric_name in mw.metric_manager)
@inlineCallbacks
def assert_redis_timestamp_exists(self, mw, key_parts, ttl=None):
key = mw.key(*key_parts)
timestamp = yield mw.redis.get(key)
self.assertTrue(timestamp, "Expected timestamp %r in Redis." % (key,))
if ttl is not None:
actual_ttl = yield mw.redis.ttl(key)
self.assertTrue(
0 <= actual_ttl <= ttl,
"Expected ttl of %r to be less than %f, but got: %f" % (
key, ttl, actual_ttl))
@inlineCallbacks
def assert_no_redis_timestamp(self, mw, key_parts):
key = mw.key(*key_parts)
timestamp = yield mw.redis.get(key)
self.assertEqual(
timestamp, None, "Timestamp %r in Redis, expected none." % (key,))
def assert_msg_timestamp_exists(self, mw, msg, transport_name):
timestamp = mw._message_metadata(msg).get(transport_name)
self.assertNotEqual(
timestamp, None, "Expected timestamp in message metadata.")
@inlineCallbacks
def set_redis_timestamp(self, mw, dt, key_parts):
key = mw.key(*key_parts)
timestamp = time.time() + dt
yield mw.redis.set(key, repr(timestamp))
def set_msg_timestamp(self, mw, msg, dt, transport_name):
timestamp = time.time() + dt
mw._message_metadata(msg)[transport_name] = repr(timestamp)
@inlineCallbacks
def test_active_inbound_counters(self):
mw = yield self.get_middleware({'op_mode': 'active'})
msg1 = self.mw_helper.make_inbound("foo", transport_name='endpoint_0')
msg2 = self.mw_helper.make_inbound("foo", transport_name='endpoint_1')
msg3 = self.mw_helper.make_inbound("foo", transport_name='endpoint_1')
# The middleware inspects the message's transport_name value, not
# the dispatcher endpoint it was received on.
yield mw.handle_inbound(msg1, 'dummy_endpoint')
yield mw.handle_inbound(msg2, 'dummy_endpoint')
yield mw.handle_inbound(msg3, 'dummy_endpoint')
self.assert_metrics(mw, {
'endpoint_0.inbound.counter': [1],
'endpoint_1.inbound.counter': [1, 1],
})
@inlineCallbacks
def test_passive_inbound_counters(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg1 = self.mw_helper.make_inbound("foo", transport_name='endpoint_0')
yield mw.handle_inbound(msg1, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_active_outbound_counters(self):
mw = yield self.get_middleware({'op_mode': 'active'})
msg1 = self.mw_helper.make_outbound("x", transport_name='endpoint_0')
msg2 = self.mw_helper.make_outbound("x", transport_name='endpoint_1')
msg3 = self.mw_helper.make_outbound("x", transport_name='endpoint_1')
# The middleware inspects the message's transport_name value, not
# the dispatcher endpoint it was received on.
yield mw.handle_outbound(msg1, 'dummy_endpoint')
yield mw.handle_outbound(msg2, 'dummy_endpoint')
yield mw.handle_outbound(msg3, 'dummy_endpoint')
self.assert_metrics(mw, {
'endpoint_0.outbound.counter': [1],
'endpoint_1.outbound.counter': [1, 1],
})
@inlineCallbacks
def test_passive_outbound_counters(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg1 = self.mw_helper.make_outbound("x", transport_name='endpoint_0')
yield mw.handle_outbound(msg1, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_active_response_time_inbound(self):
mw = yield self.get_middleware({'op_mode': 'active'})
msg = self.mw_helper.make_inbound("foo", transport_name='endpoint_0')
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_msg_timestamp_exists(mw, msg, 'endpoint_0')
yield self.assert_no_redis_timestamp(
mw, ['endpoint_0', msg['message_id']])
@inlineCallbacks
def test_passive_response_time_inbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_inbound("foo", transport_name='endpoint_0')
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_msg_timestamp_exists(mw, msg, 'dummy_endpoint')
yield self.assert_no_redis_timestamp(
mw, ['dummy_endpoint', msg['message_id']])
@inlineCallbacks
def test_active_response_time_comparison_on_outbound(self):
mw = yield self.get_middleware({'op_mode': 'active'})
inbound_msg = self.mw_helper.make_inbound(
"foo", transport_name='endpoint_0')
self.set_msg_timestamp(mw, inbound_msg, -10, 'endpoint_0')
outbound_msg = inbound_msg.reply("bar")
yield mw.handle_outbound(outbound_msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'endpoint_0.timer': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_passive_response_time_comparison_on_outbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
inbound_msg = self.mw_helper.make_inbound(
"foo", transport_name='endpoint_0')
self.set_msg_timestamp(mw, inbound_msg, -10, 'dummy_endpoint')
outbound_msg = inbound_msg.reply("bar")
yield mw.handle_outbound(outbound_msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.timer': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_sessions_started_on_inbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_inbound(
"foo", session_event=TransportUserMessage.SESSION_NEW)
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.sessions_started.counter': [1],
})
@inlineCallbacks
def test_saving_session_start_timestamp_on_inbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_inbound(
"foo", session_event=TransportUserMessage.SESSION_NEW)
yield mw.handle_inbound(msg, 'dummy_endpoint')
yield self.assert_redis_timestamp_exists(
mw, ['dummy_endpoint', msg['to_addr']], ttl=600)
@inlineCallbacks
def test_session_close_on_inbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_inbound(
"foo", session_event=TransportUserMessage.SESSION_CLOSE)
yield self.set_redis_timestamp(
mw, -10, ['dummy_endpoint', msg['to_addr']])
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.session_time': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_session_close_on_inbound_with_billing_unit(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'session_billing_unit': 50,
})
msg = self.mw_helper.make_inbound(
"foo", session_event=TransportUserMessage.SESSION_CLOSE)
yield self.set_redis_timestamp(
mw, -10, ['dummy_endpoint', msg['to_addr']])
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.session_time': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
'dummy_endpoint.rounded.50s.session_time': {
'values': (lambda v: v >= 50),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_sessions_started_on_outbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_outbound(
"foo", session_event=TransportUserMessage.SESSION_NEW)
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.sessions_started.counter': [1],
})
@inlineCallbacks
def test_saving_session_start_timestamp_on_outbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_outbound(
"foo", session_event=TransportUserMessage.SESSION_NEW)
yield mw.handle_outbound(msg, 'dummy_endpoint')
yield self.assert_redis_timestamp_exists(
mw, ['dummy_endpoint', msg['from_addr']], ttl=600)
@inlineCallbacks
def test_session_close_on_outbound(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
msg = self.mw_helper.make_outbound(
"foo", session_event=TransportUserMessage.SESSION_CLOSE)
yield self.set_redis_timestamp(
mw, -10, ['dummy_endpoint', msg['from_addr']])
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.session_time': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_session_close_on_outbound_with_billing_unit(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'session_billing_unit': 50,
})
msg = self.mw_helper.make_outbound(
"foo", session_event=TransportUserMessage.SESSION_CLOSE)
yield self.set_redis_timestamp(
mw, -10, ['dummy_endpoint', msg['from_addr']])
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.session_time': {
'values': (lambda v: v > 10),
'aggs': ['avg', 'sum'],
},
'dummy_endpoint.rounded.50s.session_time': {
'values': (lambda v: v >= 50),
'aggs': ['avg', 'sum'],
},
})
@inlineCallbacks
def test_provider_metrics_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'provider_metrics': True,
})
msg = self.mw_helper.make_inbound("foo", provider="MYMNO")
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.provider.mymno.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_unknown_provider_metrics_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'provider_metrics': True,
})
msg = self.mw_helper.make_inbound("foo")
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.provider.unknown.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_provider_metrics_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'provider_metrics': True,
})
msg = self.mw_helper.make_outbound("foo", provider="MYMNO")
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.provider.mymno.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_unknown_provider_metrics_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'provider_metrics': True,
})
msg = self.mw_helper.make_outbound("foo")
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.provider.unknown.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_tagpool_metrics_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'track_pool': True},
},
})
msg = self.mw_helper.make_inbound("foo", provider="MYMNO")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagA"))
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tagpool.mypool.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_tagpool_metrics_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'track_pool': True},
},
})
msg = self.mw_helper.make_outbound("foo")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagA"))
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tagpool.mypool.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_track_all_tags_metrics_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'track_all_tags': True},
},
})
msg = self.mw_helper.make_inbound("foo", provider="MYMNO")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagA"))
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.taga.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_track_all_tags_metrics_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'track_all_tags': True},
},
})
msg = self.mw_helper.make_outbound("foo")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagA"))
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.taga.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_track_specific_tag_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'tags': ['tagC', 'tagD']},
},
})
msg = self.mw_helper.make_inbound("foo", provider="MYMNO")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagC"))
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.tagc.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_track_specific_tag_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'tags': ['tagC', 'tagD']},
},
})
msg = self.mw_helper.make_outbound("foo")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "tagC"))
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.tagc.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_slugify_tagname(self):
mw = yield self.get_middleware({})
self.assertEqual(mw.slugify_tagname("*123"), "123")
self.assertEqual(mw.slugify_tagname("*#123"), "123")
self.assertEqual(mw.slugify_tagname("123!"), "123")
self.assertEqual(mw.slugify_tagname("123!+"), "123")
self.assertEqual(mw.slugify_tagname("1*23"), "1.23")
self.assertEqual(mw.slugify_tagname("1*!23"), "1.23")
self.assertEqual(mw.slugify_tagname("*12*3#"), "12.3")
self.assertEqual(
mw.slugify_tagname("foo@example.com"), "foo.example.com")
@inlineCallbacks
def test_slugify_tag_on_inbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'tags': ['*123*456#']},
},
})
msg = self.mw_helper.make_inbound("foo", provider="MYMNO")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "*123*456#"))
yield mw.handle_inbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.123.456.inbound.counter': [1],
'dummy_endpoint.inbound.counter': [1],
})
@inlineCallbacks
def test_slugify_tag_on_outbound(self):
mw = yield self.get_middleware({
'op_mode': 'passive',
'tagpools': {
'mypool': {'tags': ['*123*567#']},
},
})
msg = self.mw_helper.make_outbound("foo")
TaggingMiddleware.add_tag_to_msg(msg, ("mypool", "*123*567#"))
yield mw.handle_outbound(msg, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.tag.mypool.123.567.outbound.counter': [1],
'dummy_endpoint.outbound.counter': [1],
})
@inlineCallbacks
def test_ack_event(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
event = self.mw_helper.make_ack()
mw.handle_event(event, 'dummy_endpoint')
self.assert_metrics(mw, {
'dummy_endpoint.event.ack.counter': [1],
})
@inlineCallbacks
def test_delivery_report_event(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
for status in ['delivered', 'failed']:
dr = self.mw_helper.make_delivery_report(delivery_status=status)
mw.handle_event(dr, 'dummy_endpoint')
def metric_name(status):
return 'dummy_endpoint.event.delivery_report.%s.counter' % (
status,)
self.assert_metrics(mw, {
metric_name('delivered'): [1],
metric_name('failed'): [1],
})
@inlineCallbacks
def test_failure(self):
mw = yield self.get_middleware({'op_mode': 'passive'})
for failure in ['permanent', 'temporary', None]:
fail_msg = FailureMessage(message='foo', failure_code=failure,
reason='bar')
mw.handle_failure(fail_msg, 'dummy_endpoint')
def metric_name(status):
return 'dummy_endpoint.failure.%s.counter' % (status,)
self.assert_metrics(mw, {
metric_name('permanent'): [1],
metric_name('temporary'): [1],
metric_name('unspecified'): [1],
})
@inlineCallbacks
def test_session_max_lifetime(self):
mw = yield self.get_middleware({'max_session_time': 10})
msg1 = self.mw_helper.make_inbound(
'foo', session_event=TransportUserMessage.SESSION_NEW)
yield mw.handle_inbound(msg1, 'dummy_endpoint')
yield self.assert_redis_timestamp_exists(
mw, ['dummy_endpoint', msg1['to_addr']], ttl=10)
@inlineCallbacks
def test_metric_connectors_inbound_metrics_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
msg1 = self.mw_helper.make_inbound("foo")
msg2 = yield mw.handle_inbound(msg1, 'conn_1')
self.assert_metrics(mw, {
'conn_1.inbound.counter': [1],
})
self.assertEqual(msg1, msg2)
@inlineCallbacks
def test_metric_connectors_inbound_metrics_not_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
msg1 = self.mw_helper.make_inbound("foo")
msg2 = yield mw.handle_inbound(msg1, 'conn_2')
self.assert_metrics_absent(mw, [
'conn_2.inbound.counter',
])
self.assertEqual(msg1, msg2)
@inlineCallbacks
def test_metric_connectors_outbound_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
msg1 = self.mw_helper.make_outbound("foo")
msg2 = yield mw.handle_outbound(msg1, 'conn_1')
self.assert_metrics(mw, {
'conn_1.outbound.counter': [1],
})
self.assertEqual(msg1, msg2)
@inlineCallbacks
def test_metric_connectors_outbound_not_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
msg1 = self.mw_helper.make_outbound("foo")
msg2 = yield mw.handle_outbound(msg1, 'conn_2')
self.assert_metrics_absent(mw, [
'conn_2.outbound.counter',
])
self.assertEqual(msg1, msg2)
@inlineCallbacks
def test_metric_connectors_event_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
event1 = self.mw_helper.make_ack()
event2 = mw.handle_event(event1, 'conn_1')
self.assert_metrics(mw, {
'conn_1.event.ack.counter': [1],
})
self.assertEqual(event1, event2)
@inlineCallbacks
def test_metric_connectors_event_not_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
event1 = self.mw_helper.make_ack()
event2 = mw.handle_event(event1, 'conn_2')
self.assert_metrics_absent(mw, [
'conn_2.event.ack.counter',
])
self.assertEqual(event1, event2)
@inlineCallbacks
def test_metric_connectors_failure_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
fail1 = FailureMessage(
message='foo', failure_code='permanent', reason='bar')
fail2 = mw.handle_failure(fail1, 'conn_1')
self.assert_metrics(mw, {
'conn_1.failure.permanent.counter': [1],
})
self.assertEqual(fail1, fail2)
@inlineCallbacks
def test_metric_connectors_failure_not_fired(self):
mw = yield self.get_middleware({'metric_connectors': ['conn_1']})
fail1 = FailureMessage(
message='foo', failure_code='permanent', reason='bar')
fail2 = mw.handle_failure(fail1, 'conn_2')
self.assert_metrics_absent(mw, [
'conn_2.failure.permanent.counter',
])
self.assertEqual(fail1, fail2)
def collect_all_results(index_page, results=None):
if results is None:
results = []
if index_page is None:
return results
results.extend(index_page)
return index_page.next_page().addCallback(collect_all_results, results)
class TestConversationStoringMiddleware(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mw_helper = self.add_helper(
MiddlewareHelper(ConversationStoringMiddleware))
yield self.mw_helper.setup_vumi_api()
self.user_helper = yield self.mw_helper.make_user(u'user')
self.conv = yield self.user_helper.create_conversation(u'dummy_conv')
@inlineCallbacks
def assert_stored_inbound(self, msgs):
mdb = self.mw_helper.get_vumi_api().mdb
index_page = yield mdb.batch_inbound_keys_page(self.conv.batch.key)
ids = yield collect_all_results(index_page)
self.assertEqual(sorted(ids), sorted(m['message_id'] for m in msgs))
@inlineCallbacks
def assert_stored_outbound(self, msgs):
mdb = self.mw_helper.get_vumi_api().mdb
index_page = yield mdb.batch_outbound_keys_page(self.conv.batch.key)
ids = yield collect_all_results(index_page)
self.assertEqual(sorted(ids), sorted(m['message_id'] for m in msgs))
@inlineCallbacks
def test_conversation_cache_ttl_config(self):
"""
The conversation_cache_ttl config option is passed to the cache.
"""
# When the config isn't provided, we use the default.
mw = yield self.mw_helper.create_middleware()
self.assertEqual(mw._conversation_cache._ttl, 5)
mw2 = yield self.mw_helper.create_middleware(
{"conversation_cache_ttl": 0})
self.assertEqual(mw2._conversation_cache._ttl, 0)
@inlineCallbacks
def test_inbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg1 = self.mw_helper.make_inbound("inbound", conv=self.conv)
yield mw.handle_consume_inbound(msg1, 'default')
yield self.assert_stored_inbound([msg1])
msg2 = self.mw_helper.make_inbound("inbound", conv=self.conv)
yield mw.handle_publish_inbound(msg2, 'default')
yield self.assert_stored_inbound([msg1, msg2])
@inlineCallbacks
def test_inbound_message_no_consume_store(self):
mw = yield self.mw_helper.create_middleware({
'store_on_consume': False,
})
msg1 = self.mw_helper.make_inbound("inbound", conv=self.conv)
yield mw.handle_consume_inbound(msg1, 'default')
yield self.assert_stored_inbound([])
msg2 = self.mw_helper.make_inbound("inbound", conv=self.conv)
yield mw.handle_publish_inbound(msg2, 'default')
yield self.assert_stored_inbound([msg2])
@inlineCallbacks
def test_outbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg1 = self.mw_helper.make_outbound("outbound", conv=self.conv)
yield mw.handle_consume_outbound(msg1, 'default')
yield self.assert_stored_outbound([msg1])
msg2 = self.mw_helper.make_outbound("outbound", conv=self.conv)
yield mw.handle_publish_outbound(msg2, 'default')
yield self.assert_stored_outbound([msg1, msg2])
@inlineCallbacks
def test_outbound_message_no_consume_store(self):
mw = yield self.mw_helper.create_middleware({
'store_on_consume': False,
})
msg1 = self.mw_helper.make_outbound("outbound", conv=self.conv)
yield mw.handle_consume_outbound(msg1, 'default')
yield self.assert_stored_outbound([])
msg2 = self.mw_helper.make_outbound("outbound", conv=self.conv)
yield mw.handle_publish_outbound(msg2, 'default')
yield self.assert_stored_outbound([msg2])
@inlineCallbacks
def test_conversation_cached_for_inbound_message(self):
"""
When we process an inbound message, the conversation lookup is cached.
"""
mw = yield self.mw_helper.create_middleware()
cache = mw._conversation_cache
self.assertEqual(cache._models.keys(), [])
msg1 = self.mw_helper.make_inbound("inbound", conv=self.conv)
yield mw.handle_consume_inbound(msg1, 'default')
self.assertEqual(cache._models.keys(), [self.conv.key])
@inlineCallbacks
def test_conversation_cached_for_outbound_message(self):
"""
When we process an outbound message, the conversation lookup is cached.
"""
mw = yield self.mw_helper.create_middleware()
cache = mw._conversation_cache
self.assertEqual(cache._models.keys(), [])
msg1 = self.mw_helper.make_outbound("outbound", conv=self.conv)
yield mw.handle_consume_outbound(msg1, 'default')
self.assertEqual(cache._models.keys(), [self.conv.key])
class TestRouterStoringMiddleware(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mw_helper = self.add_helper(
MiddlewareHelper(RouterStoringMiddleware))
yield self.mw_helper.setup_vumi_api()
self.user_helper = yield self.mw_helper.make_user(u'user')
self.router = yield self.user_helper.create_router(u'dummy_conv')
@inlineCallbacks
def assert_stored_inbound(self, msgs):
mdb = self.mw_helper.get_vumi_api().mdb
index_page = yield mdb.batch_inbound_keys_page(self.router.batch.key)
ids = yield collect_all_results(index_page)
self.assertEqual(sorted(ids), sorted(m['message_id'] for m in msgs))
@inlineCallbacks
def assert_stored_outbound(self, msgs):
mdb = self.mw_helper.get_vumi_api().mdb
index_page = yield mdb.batch_outbound_keys_page(self.router.batch.key)
ids = yield collect_all_results(index_page)
self.assertEqual(sorted(ids), sorted(m['message_id'] for m in msgs))
@inlineCallbacks
def test_inbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg1 = self.mw_helper.make_inbound("inbound", router=self.router)
yield mw.handle_consume_inbound(msg1, 'default')
yield self.assert_stored_inbound([msg1])
msg2 = self.mw_helper.make_inbound("inbound", router=self.router)
yield mw.handle_publish_inbound(msg2, 'default')
yield self.assert_stored_inbound([msg1, msg2])
@inlineCallbacks
def test_inbound_message_no_consume_store(self):
mw = yield self.mw_helper.create_middleware({
'store_on_consume': False,
})
msg1 = self.mw_helper.make_inbound("inbound", router=self.router)
yield mw.handle_consume_inbound(msg1, 'default')
yield self.assert_stored_inbound([])
msg2 = self.mw_helper.make_inbound("inbound", router=self.router)
yield mw.handle_publish_inbound(msg2, 'default')
yield self.assert_stored_inbound([msg2])
@inlineCallbacks
def test_outbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg1 = self.mw_helper.make_outbound("outbound", router=self.router)
yield mw.handle_consume_outbound(msg1, 'default')
yield self.assert_stored_outbound([msg1])
msg2 = self.mw_helper.make_outbound("outbound", router=self.router)
yield mw.handle_publish_outbound(msg2, 'default')
yield self.assert_stored_outbound([msg1, msg2])
@inlineCallbacks
def test_outbound_message_no_consume_store(self):
mw = yield self.mw_helper.create_middleware({
'store_on_consume': False,
})
msg1 = self.mw_helper.make_outbound("outbound", router=self.router)
yield mw.handle_consume_outbound(msg1, 'default')
yield self.assert_stored_outbound([])
msg2 = self.mw_helper.make_outbound("outbound", router=self.router)
yield mw.handle_publish_outbound(msg2, 'default')
yield self.assert_stored_outbound([msg2])
class TestConversationMetricsMiddleware(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.mw_helper = self.add_helper(
MiddlewareHelper(ConversationMetricsMiddleware))
yield self.mw_helper.setup_vumi_api()
self.user_helper = yield self.mw_helper.make_user(u'user')
self.conv = yield self.user_helper.create_conversation(
u'bulk_message', name=u'Test Conversation', started=True)
@inlineCallbacks
def assert_conv_key_stored(self, mw, msg):
value = yield mw.redis.smembers(
ConversationMetricsMiddleware.RECENT_CONV_KEY)
conv_details = '{"account_key": "%s","conv_key": "%s"}' % \
(self.conv.user_account.key, self.conv.key)
self.assertTrue(conv_details in value)
self.assertEqual(len(value), 1)
self.assertIn(conv_details, mw.local_recent_convs)
@inlineCallbacks
def assert_conv_key_not_stored(self, mw):
value = yield mw.redis.smembers(
ConversationMetricsMiddleware.RECENT_CONV_KEY)
self.assertSetEqual(value, set([]))
self.assertSetEqual(mw.local_recent_convs, set([]))
@inlineCallbacks
def test_inbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg_helper = GoMessageHelper(vumi_helper=self.mw_helper)
yield self.assert_conv_key_not_stored(mw)
[msg] = yield msg_helper.add_inbound_to_conv(self.conv, 1)
yield mw.handle_inbound(msg, "conn_1")
yield self.assert_conv_key_stored(mw, msg)
@inlineCallbacks
def test_outbound_message(self):
mw = yield self.mw_helper.create_middleware()
msg_helper = GoMessageHelper(vumi_helper=self.mw_helper)
yield self.assert_conv_key_not_stored(mw)
[msg] = yield msg_helper.add_outbound_to_conv(self.conv, 1)
yield mw.handle_outbound(msg, "conn_1")
yield self.assert_conv_key_stored(mw, msg)
@inlineCallbacks
def test_local_recent_convs_shields_redis(self):
mw = yield self.mw_helper.create_middleware()
msg_helper = GoMessageHelper(vumi_helper=self.mw_helper)
conv_details = '{"account_key": "%s","conv_key": "%s"}' % \
(self.conv.user_account.key, self.conv.key)
yield self.assert_conv_key_not_stored(mw)
[msg] = yield msg_helper.add_inbound_to_conv(self.conv, 1)
mw.local_recent_convs.add(conv_details)
yield mw.record_conv_seen(msg)
value = yield mw.redis.smembers(
ConversationMetricsMiddleware.RECENT_CONV_KEY)
self.assertSetEqual(value, set([]))
@inlineCallbacks
def test_reset_local_recent_convs(self):
mw = yield self.mw_helper.create_middleware()
mw.local_recent_convs.update(["conv1", "conv2"])
mw.reset_local_recent_convs()
self.assertEqual(mw.local_recent_convs, set([]))
|
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic_session_run_hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import threading
import time
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.python.framework import meta_graph
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
class SecondOrStepTimerTest(tf.test.TestCase):
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks._SecondOrStepTimer(every_secs=2.0, every_steps=10)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks._SecondOrStepTimer()
def test_every_secs(self):
timer = basic_session_run_hooks._SecondOrStepTimer(every_secs=1.0)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
time.sleep(1.0)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertTrue(timer.should_trigger_for_step(2))
def test_every_steps(self):
timer = basic_session_run_hooks._SecondOrStepTimer(every_steps=3)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
self.assertFalse(timer.should_trigger_for_step(3))
self.assertTrue(timer.should_trigger_for_step(4))
def test_update_last_triggered_step(self):
timer = basic_session_run_hooks._SecondOrStepTimer(every_steps=1)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)
self.assertEqual(None, elapsed_secs)
self.assertEqual(None, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)
self.assertLess(0, elapsed_secs)
self.assertEqual(4, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)
self.assertLess(0, elapsed_secs)
self.assertEqual(2, elapsed_steps)
class StopAtStepTest(tf.test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
tf.train.StopAtStepHook(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
h = tf.train.StopAtStepHook(last_step=10)
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
no_op = tf.no_op()
h.begin()
with tf.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(tf.assign(global_step, 5))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(tf.assign(global_step, 9))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(tf.assign(global_step, 10))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(tf.assign(global_step, 11))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_on_num_step(self):
h = tf.train.StopAtStepHook(num_steps=10)
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
no_op = tf.no_op()
h.begin()
with tf.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(tf.assign(global_step, 5))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(tf.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(tf.assign(global_step, 14))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(tf.assign(global_step, 15))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
class LoggingTensorHookTest(tf.test.TestCase):
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = tf.logging.info
self.logged_message = None
def mock_log(*args, **kwargs):
self.logged_message = args
self._actual_log(*args, **kwargs)
tf.logging.info = mock_log
def tearDown(self):
tf.logging.info = self._actual_log
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
tf.train.LoggingTensorHook(tensors=['t'], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
tf.train.LoggingTensorHook(tensors=['t'])
def test_print_every_n_steps(self):
with tf.Graph().as_default(), tf.Session() as sess:
t = tf.constant(42.0, name='foo')
train_op = tf.constant(3)
hook = tf.train.LoggingTensorHook(tensors=[t.name], every_n_iter=10)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(tf.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
for j in range(3):
_ = j
self.logged_message = ''
for i in range(9):
_ = i
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_print_every_n_secs(self):
with tf.Graph().as_default(), tf.Session() as sess:
t = tf.constant(42.0, name='foo')
train_op = tf.constant(3)
hook = tf.train.LoggingTensorHook(tensors=[t.name], every_n_secs=1.0)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(tf.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.logged_message = ''
mon_sess.run(train_op)
self.assertEqual(str(self.logged_message).find(t.name), -1)
time.sleep(1.0)
self.logged_message = ''
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
class CheckpointSaverHookTest(tf.test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = tf.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.train_op = tf.assign_add(self.global_step, 1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_raise_when_saver_and_scaffold_both_missing(self):
with self.assertRaises(ValueError):
tf.train.CheckpointSaverHook(self.model_dir)
def test_raise_when_saver_and_scaffold_both_present(self):
with self.assertRaises(ValueError):
tf.train.CheckpointSaverHook(
self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
tf.train.CheckpointSaverHook(self.model_dir, save_secs=10, save_steps=20)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
tf.train.CheckpointSaverHook(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_secs_saves_periodically(self):
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
mon_sess.run(self.train_op)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
time.sleep(2.5)
mon_sess.run(self.train_op)
# saved
self.assertEqual(6, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(2, tf.contrib.framework.load_variable(
self.model_dir, self.global_step.name))
def test_summary_writer_defs(self):
testing.FakeSummaryWriter.install()
tf.train.SummaryWriterCache.clear()
summary_writer = tf.train.SummaryWriterCache.get(self.model_dir)
with self.graph.as_default():
hook = tf.train.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with tf.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.model_dir,
expected_added_meta_graphs=[meta_graph.create_meta_graph_def(
graph_def=self.graph.as_graph_def(add_shapes=True),
saver_def=self.scaffold.saver.saver_def)])
testing.FakeSummaryWriter.uninstall()
class StepCounterHookTest(tf.test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
def test_step_counter_every_n_steps(self):
with tf.Graph().as_default() as g, tf.Session() as sess:
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = tf.assign_add(global_step, 1)
summary_writer = testing.FakeSummaryWriter(self.log_dir, g)
hook = tf.train.StepCounterHook(
summary_writer=summary_writer, every_n_steps=10)
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
time.sleep(0.01)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([11, 21], summary_writer.summaries.keys())
for step in [11, 21]:
summary_value = summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_step_counter_every_n_secs(self):
with tf.Graph().as_default() as g, tf.Session() as sess:
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = tf.assign_add(global_step, 1)
summary_writer = testing.FakeSummaryWriter(self.log_dir, g)
hook = tf.train.StepCounterHook(
summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
time.sleep(0.2)
mon_sess.run(train_op)
time.sleep(0.2)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2, 3], summary_writer.summaries.keys())
for summary in summary_writer.summaries.values():
summary_value = summary[0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
class SummarySaverHookTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = testing.FakeSummaryWriter(self.log_dir)
var = tf.Variable(0.0)
tensor = tf.assign_add(var, 1.0)
tensor2 = tensor * 2
self.summary_op = tf.summary.scalar('my_summary', tensor)
self.summary_op2 = tf.summary.scalar('my_summary2', tensor2)
global_step = tf.contrib.framework.get_or_create_global_step()
self.train_op = tf.assign_add(global_step, 1)
def test_raise_when_scaffold_and_summary_op_both_missing(self):
with self.assertRaises(ValueError):
tf.train.SummarySaverHook()
def test_raise_when_scaffold_and_summary_op_both_present(self):
with self.assertRaises(ValueError):
tf.train.SummarySaverHook(scaffold=tf.train.Scaffold(),
summary_op=self.summary_op)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
tf.train.SummarySaverHook(
save_secs=10,
save_steps=20,
summary_writer=self.summary_writer)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
tf.train.SummarySaverHook(
save_secs=None,
save_steps=None,
summary_writer=self.summary_writer)
def test_save_steps(self):
hook = tf.train.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {'my_summary': 1.0},
9: {'my_summary': 2.0},
17: {'my_summary': 3.0},
25: {'my_summary': 4.0},
})
def test_multiple_summaries(self):
hook = tf.train.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=[self.summary_op, self.summary_op2])
with self.test_session() as sess:
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(10):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0,
'my_summary2': 2.0
},
9: {
'my_summary': 2.0,
'my_summary2': 4.0
},
})
def test_save_secs_saving_once_every_step(self):
hook = tf.train.SummarySaverHook(
save_secs=0.5,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(4):
mon_sess.run(self.train_op)
time.sleep(0.5)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {'my_summary': 1.0},
2: {'my_summary': 2.0},
3: {'my_summary': 3.0},
4: {'my_summary': 4.0},
})
def test_save_secs_saving_once_every_three_steps(self):
hook = tf.train.SummarySaverHook(
save_secs=0.9,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.test_session() as sess:
hook.begin()
sess.run(tf.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(8):
mon_sess.run(self.train_op)
time.sleep(0.3)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {'my_summary': 1.0},
4: {'my_summary': 2.0},
7: {'my_summary': 3.0},
})
class GlobalStepWaiterHookTest(tf.test.TestCase):
def test_not_wait_for_step_zero(self):
with tf.Graph().as_default():
tf.contrib.framework.get_or_create_global_step()
hook = tf.train.GlobalStepWaiterHook(wait_until_step=0)
hook.begin()
with tf.Session() as sess:
# Before run should return without waiting gstep increment.
hook.before_run(
tf.train.SessionRunContext(
original_args=None, session=sess))
def test_wait_for_step(self):
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
hook = tf.train.GlobalStepWaiterHook(wait_until_step=1000)
hook.begin()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
waiter = threading.Thread(
target=hook.before_run,
args=(tf.train.SessionRunContext(
original_args=None, session=sess),))
waiter.daemon = True
waiter.start()
time.sleep(1.0)
self.assertTrue(waiter.is_alive())
sess.run(tf.assign(gstep, 500))
time.sleep(1.0)
self.assertTrue(waiter.is_alive())
sess.run(tf.assign(gstep, 1100))
time.sleep(1.2)
self.assertFalse(waiter.is_alive())
if __name__ == '__main__':
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.