gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os
import sys
import traceback
import xbmc
import xbmcgui
from mythbox.bus import EventBus
class BootStrapper(object):
def __init__(self, splash):
self.log = None
self.platform = None
self.stage = 'Initializing'
self.shell = None
self.splash = splash
self.failSilent = False
def run(self):
try:
try:
self.bootstrapLogger()
self.bootstrapPlatform()
self.bootstrapEventBus()
self.bootstrapCaches()
self.bootstrapSettings()
self.bootstrapUpdater()
self.bootstrapFeeds()
# TODO: Re-enable when twisted not loaded from dist-packages
#self.bootstrapDebugShell()
self.bootstrapHomeScreen()
except Exception, ex:
if not self.failSilent:
self.handleFailure(ex)
finally:
self.splash.close()
def handleFailure(self, cause):
msg = 'MythBox:%s - Error: %s' % (self.stage, cause)
xbmc.log(msg)
print traceback.print_exc()
if self.log:
self.log.exception(str(cause))
xbmcgui.Dialog().ok('MythBox Error', 'Stage: %s' % self.stage, 'Exception: %s' % str(cause))
def updateProgress(self, msg):
self.log.info(msg)
def bootstrapLogger(self):
import logging
import logging.config
self.stage = 'Initializing Logger'
import xbmcaddon
scriptDir = xbmcaddon.Addon('script.mythbox').getAddonInfo('path')
# if 'win32' in sys.platform:
# loggerIniFile = os.path.join(scriptDir, 'mythbox_win32_log.ini')
# elif 'darwin' in sys.platform:
# import StringIO, re
# loggerIniFile = os.path.join(scriptDir, 'mythbox_log.ini')
# logconfig = open(loggerIniFile, 'r').read()
# loggerIniFile = StringIO.StringIO(re.sub('mythbox\.log', os.path.expanduser(os.path.join('~', 'Library', 'Logs', 'mythbox.log')) , logconfig, 1))
# else:
loggerIniFile = os.path.join(scriptDir, 'mythbox_log.ini')
# needs to be in local scope for fileConfig to find it
from mythbox.log import XbmcLogHandler
xbmc.log('MythBox: loggerIniFile = %s' % loggerIniFile)
logging.config.fileConfig(loggerIniFile)
self.log = logging.getLogger('mythbox.core')
self.log.info('Mythbox Logger Initialized')
def bootstrapPlatform(self):
self.stage = 'Initializing Platform'
import mythbox.platform
self.platform = mythbox.platform.getPlatform()
self.platform.addLibsToSysPath()
sys.setcheckinterval(0)
cacheDir = self.platform.getCacheDir()
from mythbox.util import requireDir
requireDir(cacheDir)
self.log.info('MythBox %s Initialized' % self.platform.addonVersion())
def bootstrapEventBus(self):
self.bus = EventBus()
def bootstrapCaches(self):
self.stage = 'Initializing Caches'
from mythbox.util import NativeTranslator
from mythbox.filecache import FileCache, HttpResolver, MythThumbnailFileCache
from mythbox.mythtv.resolver import MythChannelIconResolver, MythThumbnailResolver
from os.path import join
from mythbox.mythtv.cache import DomainCache
self.domainCache = DomainCache(bus=self.bus)
cacheDir = self.platform.getCacheDir()
self.translator = NativeTranslator(self.platform.getScriptDir())
self.mythThumbnailCache = MythThumbnailFileCache(join(cacheDir, 'thumbnail'), MythThumbnailResolver(), self.bus, self.domainCache)
self.mythChannelIconCache = FileCache(join(cacheDir, 'channel'), MythChannelIconResolver())
self.httpCache = FileCache(join(cacheDir, 'http'), HttpResolver())
self.cachesByName = {
'mythThumbnailCache' : self.mythThumbnailCache,
'mythChannelIconCache': self.mythChannelIconCache,
'httpCache' : self.httpCache,
'domainCache' : self.domainCache
}
def bootstrapSettings(self):
self.stage = 'Initializing Settings'
from mythbox.settings import MythSettings
self.settings = MythSettings(self.platform, self.translator, 'settings.xml', self.bus)
#from fanart import FanArt
#self.log.debug('Settings = \n %s' % self.settings)
class DelayedInstantiationProxy(object):
'''Could use a little introspection to sort this out but eh...'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.fanArt = None
def requireDelegate(self):
if self.fanArt is None:
from fanart import FanArt
self.fanArt = FanArt(*self.args, **self.kwargs)
def getSeasonAndEpisode(self, program):
self.requireDelegate()
return self.fanArt.getSeasonAndEpisode(program)
def getRandomPoster(self, program):
self.requireDelegate()
return self.fanArt.getRandomPoster(program)
def getPosters(self, program):
self.requireDelegate()
return self.fanArt.getPosters(program)
def hasPosters(self, program):
self.requireDelegate()
return self.fanArt.hasPosters(program)
def clear(self):
self.requireDelegate()
self.fanArt.clear()
def shutdown(self):
self.requireDelegate()
self.fanArt.shutdown()
def configure(self, settings):
self.requireDelegate()
self.fanArt.configure(settings)
def onEvent(self, event):
self.requireDelegate()
self.fanArt.onEvent(event)
from fanart import FanArt
self.fanArt = FanArt(self.platform, self.httpCache, self.settings, self.bus)
#self.fanArt = DelayedInstantiationProxy(self.platform, self.httpCache, self.settings, self.bus)
try:
import socket
socket.setdefaulttimeout(float(os.getenv('MYTHBOX_TIMEOUT', '30')))
except:
self.log.exception('Error setting socket timeout')
self.bus.register(self)
# Generate fake event to reflect value in settings.xml instead of mythbox_log.ini
from bus import Event
self.onEvent({'id': Event.SETTING_CHANGED, 'tag':'logging_enabled', 'old':'DontCare', 'new':self.settings.get('logging_enabled')})
def bootstrapUpdater(self):
self.stage = 'Initializing Updater'
from mythbox.updater import UpdateChecker
UpdateChecker(self.platform).run()
def bootstrapFeeds(self):
from mythbox.feeds import FeedHose
self.feedHose = FeedHose(self.settings, self.bus)
def bootstrapDebugShell(self):
# debug shell only packaged with bin/package-debug-zip
try:
from mythbox.shell import DebugShell
globals()['bootstrapper'] = self
self.shell = DebugShell(self.bus, namespace=globals())
self.shell.start()
except ImportError:
self.log.debug('Punting on debug shell -- not packaged')
def bootstrapXbmcShutdownListener(self):
from threading import Thread
class XbmcShutdownListener(Thread):
def __init__(self, home, bus, log):
Thread.__init__(self)
self.home = home
self.log = log
self.shutdownReceived = False
bus.register(self)
def onEvent(self, event):
from bus import Event
if event['id'] == Event.SHUTDOWN:
self.shutdownReceived = True
self.join()
xbmc.log('Joined shutdown listener')
def run(self):
import time
xbmc.log('XbmcShutdownListener thread running..')
cnt = 1
while not xbmc.abortRequested and not self.shutdownReceived:
#xbmc.sleep(1000)
time.sleep(1)
xbmc.log('XbmcShutdownListner abort = %s user = %s tick %d ...' % (xbmc.abortRequested, self.shutdownReceived, cnt))
cnt += 1
if xbmc.abortRequested:
xbmc.log('XBMC requested shutdown..')
self.home.shutdown()
xbmc.log('XBMC requested shutdown complete')
xbmc.log('XbmcShutdownListener thread terminating')
self.shutdownListener = XbmcShutdownListener(self.home, self.bus, self.log)
self.shutdownListener.start()
def bootstrapHomeScreen(self):
from mythbox.ui.home import HomeWindow
self.home = HomeWindow(
'mythbox_home.xml',
self.platform.getScriptDir(),
settings=self.settings,
translator=self.translator,
platform=self.platform,
fanArt=self.fanArt,
cachesByName=self.cachesByName,
bus=self.bus,
feedHose=self.feedHose)
self.splash.close()
#self.bootstrapXbmcShutdownListener()
self.home.doModal()
def onEvent(self, event):
from bus import Event
#
# Apply changes to logger when user turns debug logging on/off
#
if event['id'] == Event.SETTING_CHANGED and event['tag'] == 'logging_enabled':
import logging
logging.root.debug('Setting changed: %s %s %s' % (event['tag'], event['old'], event['new']))
if event['new'] == 'True':
level = logging.DEBUG
else:
level = logging.WARN
loggerNames = 'unittest mysql core method skin ui perf fanart settings cache event'.split() # wire inject'.split()
for name in loggerNames:
logger = logging.getLogger('mythbox.%s' % name)
logger.setLevel(level)
# TODO: Adjust xbmc loglevel
#savedXbmcLogLevel = xbmc.executehttpapi("GetLogLevel").replace("<li>", "")
#xbmc.executehttpapi('SetLogLevel(3)')
| |
from cStringIO import StringIO
import contextlib
import copy
import logging
import time
import os
import requests
import subprocess
from teuthology.config import config as teuth_config
from teuthology import misc as teuthology
from teuthology import contextutil, packaging
from teuthology.exceptions import VersionNotFoundError
from teuthology.parallel import parallel
from ..orchestra import run
log = logging.getLogger(__name__)
# Should the RELEASE value get extracted from somewhere?
RELEASE = "1-0"
# This is intended to be a complete listing of ceph packages. If we're going
# to hardcode this stuff, I don't want to do it in more than once place.
PACKAGES = {}
PACKAGES['ceph'] = {}
PACKAGES['ceph']['deb'] = [
'ceph',
'ceph-dbg',
'ceph-mds',
'ceph-mds-dbg',
'ceph-common',
'ceph-common-dbg',
'ceph-fuse',
'ceph-fuse-dbg',
'ceph-test',
'ceph-test-dbg',
'radosgw',
'radosgw-dbg',
'python-ceph',
'libcephfs1',
'libcephfs1-dbg',
'libcephfs-java',
'libcephfs-jni',
'librados2',
'librados2-dbg',
'librbd1',
'librbd1-dbg',
'rbd-fuse',
]
PACKAGES['ceph']['rpm'] = [
'ceph-debuginfo',
'ceph-radosgw',
'ceph-test',
'ceph-devel',
'ceph',
'ceph-fuse',
'cephfs-java',
'rest-bench',
'libcephfs_jni1',
'libcephfs1',
'librados2',
'librbd1',
'python-ceph',
'rbd-fuse',
]
def _get_config_value_for_remote(ctx, remote, config, key):
"""
Look through config, and attempt to determine the "best" value to use for a
given key. For example, given:
config = {
'all':
{'branch': 'master'},
'branch': 'next'
}
_get_config_value_for_remote(ctx, remote, config, 'branch')
would return 'master'.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:param key: the name of the value to retrieve
"""
roles = ctx.cluster.remotes[remote]
if 'all' in config:
return config['all'].get(key)
elif roles:
for role in roles:
if role in config and key in config[role]:
return config[role].get(key)
return config.get(key)
def _get_uri(tag, branch, sha1):
"""
Set the uri -- common code used by both install and debian upgrade
"""
uri = None
if tag:
uri = 'ref/' + tag
elif branch:
uri = 'ref/' + branch
elif sha1:
uri = 'sha1/' + sha1
else:
# FIXME: Should master be the default?
log.debug("defaulting to master branch")
uri = 'ref/master'
return uri
def _get_baseurlinfo_and_dist(ctx, remote, config):
"""
Through various commands executed on the remote, determines the
distribution name and version in use, as well as the portion of the repo
URI to use to specify which version of the project (normally ceph) to
install.Example:
{'arch': 'x86_64',
'dist': 'raring',
'dist_release': None,
'distro': 'Ubuntu',
'distro_release': None,
'flavor': 'basic',
'relval': '13.04',
'uri': 'ref/master'}
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:returns: dict -- the information you want.
"""
retval = {}
relval = None
r = remote.run(
args=['arch'],
stdout=StringIO(),
)
retval['arch'] = r.stdout.getvalue().strip()
r = remote.run(
args=['lsb_release', '-is'],
stdout=StringIO(),
)
retval['distro'] = r.stdout.getvalue().strip()
r = remote.run(
args=[
'lsb_release', '-rs'], stdout=StringIO())
retval['relval'] = r.stdout.getvalue().strip()
dist_name = None
if retval['distro'] in ('CentOS', 'RedHatEnterpriseServer'):
relval = retval['relval']
relval = relval[0:relval.find('.')]
distri = 'centos'
retval['distro_release'] = '%s%s' % (distri, relval)
retval['dist'] = retval['distro_release']
dist_name = 'el'
retval['dist_release'] = '%s%s' % (dist_name, relval)
elif retval['distro'] == 'Fedora':
distri = retval['distro']
dist_name = 'fc'
retval['distro_release'] = '%s%s' % (dist_name, retval['relval'])
retval['dist'] = retval['dist_release'] = retval['distro_release']
else:
r = remote.run(
args=['lsb_release', '-sc'],
stdout=StringIO(),
)
retval['dist'] = r.stdout.getvalue().strip()
retval['distro_release'] = None
retval['dist_release'] = None
# branch/tag/sha1 flavor
retval['flavor'] = config.get('flavor', 'basic')
log.info('config is %s', config)
tag = _get_config_value_for_remote(ctx, remote, config, 'tag')
branch = _get_config_value_for_remote(ctx, remote, config, 'branch')
sha1 = _get_config_value_for_remote(ctx, remote, config, 'sha1')
uri = _get_uri(tag, branch, sha1)
retval['uri'] = uri
return retval
def _get_baseurl(ctx, remote, config):
"""
Figures out which package repo base URL to use.
Example:
'http://gitbuilder.ceph.com/ceph-deb-raring-x86_64-basic/ref/master'
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param config: the config dict
:returns: str -- the URL
"""
# get distro name and arch
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
base_url = 'http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri}'.format(
host=teuth_config.gitbuilder_host,
proj=config.get('project', 'ceph'),
pkg_type=remote.system_type,
**baseparms
)
return base_url
def _block_looking_for_package_version(remote, base_url, wait=False):
"""
Look for, and parse, a file called 'version' in base_url.
:param remote: the teuthology.orchestra.remote.Remote object
:param wait: wait forever for the file to show up. (default False)
:returns: str -- the version e.g. '0.67-240-g67a95b9-1raring'
:raises: VersionNotFoundError
"""
while True:
resp = requests.get(base_url + '/version')
if not resp.ok:
if wait:
log.info(
'Package not there yet (got HTTP code %s), waiting...',
resp.status_code,
)
time.sleep(15)
continue
raise VersionNotFoundError(base_url)
break
version = resp.text.strip()
# FIXME: 'version' as retreived from the repo is actually the RPM version
# PLUS *part* of the release. Example:
# Right now, ceph master is given the following version in the repo file:
# v0.67-rc3.164.gd5aa3a9 - whereas in reality the RPM version is 0.61.7
# and the release is 37.g1243c97.el6 (for centos6).
# Point being, I have to mangle a little here.
if version[0] == 'v':
version = version[1:]
if '-' in version:
version = version.split('-')[0]
return version
def _get_local_dir(config, remote):
"""
Extract local directory name from the task lists.
Copy files over to the remote site.
"""
ldir = config.get('local', None)
if ldir:
remote.run(args=['sudo', 'mkdir', '-p', ldir,])
for fyle in os.listdir(ldir):
fname = "%s/%s" % (ldir, fyle)
teuthology.sudo_write_file(remote, fname, open(fname).read(), '644')
return ldir
def _update_deb_package_list_and_install(ctx, remote, debs, config):
"""
Runs ``apt-get update`` first, then runs ``apt-get install``, installing
the requested packages on the remote system.
TODO: split this into at least two functions.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: list of packages names to install
:param config: the config dict
"""
# check for ceph release key
r = remote.run(
args=[
'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
],
stdout=StringIO(),
check_status=False,
)
if r.stdout.getvalue().find('Ceph automated package') == -1:
# if it doesn't exist, add it
remote.run(
args=[
'wget', '-q', '-O-',
'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
run.Raw('|'),
'sudo', 'apt-key', 'add', '-',
],
stdout=StringIO(),
)
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
log.info("Installing packages: {pkglist} on remote deb {arch}".format(
pkglist=", ".join(debs), arch=baseparms['arch'])
)
# get baseurl
base_url = _get_baseurl(ctx, remote, config)
log.info('Pulling from %s', base_url)
# get package version string
# FIXME this is a terrible hack.
while True:
resp = requests.get(base_url + '/version')
if not resp.ok:
if config.get('wait_for_package'):
log.info('Package not there yet, waiting...')
time.sleep(15)
continue
try:
resp.raise_for_status()
except Exception:
log.exception("Error fetching package version")
raise VersionNotFoundError("%s/version" % base_url)
version = resp.text.strip()
log.info('Package version is %s', version)
break
remote.run(
args=[
'echo', 'deb', base_url, baseparms['dist'], 'main',
run.Raw('|'),
'sudo', 'tee', '/etc/apt/sources.list.d/{proj}.list'.format(
proj=config.get('project', 'ceph')),
],
stdout=StringIO(),
)
remote.run(args=['sudo', 'apt-get', 'update'], check_status=False)
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y',
'--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'install',
] + ['%s=%s' % (d, version) for d in debs],
)
ldir = _get_local_dir(config, remote)
if ldir:
for fyle in os.listdir(ldir):
fname = "%s/%s" % (ldir, fyle)
remote.run(args=['sudo', 'dpkg', '-i', fname],)
def _yum_fix_repo_priority(remote, project, uri):
"""
On the remote, 'priority=1' lines to each enabled repo in:
/etc/yum.repos.d/{project}.repo
:param remote: the teuthology.orchestra.remote.Remote object
:param project: the project whose repos need modification
"""
repo_path = '/etc/yum.repos.d/%s.repo' % project
remote.run(
args=[
'if', 'test', '-f', repo_path, run.Raw(';'), 'then',
'sudo', 'sed', '-i', '-e',
run.Raw('\':a;N;$!ba;s/enabled=1\\ngpg/enabled=1\\npriority=1\\ngpg/g\''),
'-e',
run.Raw("'s;ref/[a-zA-Z0-9_-]*/;{uri}/;g'".format(uri=uri)),
repo_path, run.Raw(';'), 'fi'
]
)
def _yum_fix_repo_host(remote, project):
"""
Update the hostname to reflect the gitbuilder_host setting.
"""
old_host = teuth_config._defaults['gitbuilder_host']
new_host = teuth_config.gitbuilder_host
if new_host == old_host:
return
repo_path = '/etc/yum.repos.d/%s.repo' % project
host_sed_expr = "'s/{0}/{1}/'".format(old_host, new_host)
remote.run(
args=[
'if', 'test', '-f', repo_path, run.Raw(';'), 'then',
'sudo', 'sed', '-i', '-e', run.Raw(host_sed_expr),
repo_path, run.Raw(';'), 'fi']
)
def _yum_set_check_obsoletes(remote):
"""
Set check_obsoletes = 1 in /etc/yum/pluginconf.d/priorities.conf
Creates a backup at /etc/yum/pluginconf.d/priorities.conf.orig so we can
restore later.
"""
conf_path = '/etc/yum/pluginconf.d/priorities.conf'
conf_path_orig = conf_path + '.orig'
remote.run(args=['sudo', 'cp', '-af', conf_path, conf_path_orig])
remote.run(args=['echo', 'check_obsoletes = 1', run.Raw('|'),
'sudo', 'tee', '-a', conf_path])
def _yum_unset_check_obsoletes(remote):
"""
Restore the /etc/yum/pluginconf.d/priorities.conf backup
"""
conf_path = '/etc/yum/pluginconf.d/priorities.conf'
conf_path_orig = conf_path + '.orig'
remote.run(args=['sudo', 'mv', '-f', conf_path_orig, conf_path],
check_status=False)
def _update_rpm_package_list_and_install(ctx, remote, rpm, config):
"""
Installs the ceph-release package for the relevant branch, then installs
the requested packages on the remote system.
TODO: split this into at least two functions.
:param ctx: the argparse.Namespace object
:param remote: the teuthology.orchestra.remote.Remote object
:param rpm: list of packages names to install
:param config: the config dict
"""
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
log.info("Installing packages: {pkglist} on remote rpm {arch}".format(
pkglist=", ".join(rpm), arch=baseparms['arch']))
host = teuth_config.gitbuilder_host
dist_release = baseparms['dist_release']
project = config.get('project', 'ceph')
start_of_url = 'http://{host}/{proj}-rpm-{distro_release}-{arch}-{flavor}/{uri}'.format(
proj=project, host=host, **baseparms)
proj_release = '{proj}-release-{release}.{dist_release}.noarch'.format(
proj=project, release=RELEASE, dist_release=dist_release)
rpm_name = "{rpm_nm}.rpm".format(rpm_nm=proj_release)
base_url = "{start_of_url}/noarch/{rpm_name}".format(
start_of_url=start_of_url, rpm_name=rpm_name)
# When this was one command with a pipe, it would sometimes
# fail with the message 'rpm: no packages given for install'
remote.run(args=['wget', base_url, ],)
remote.run(args=['sudo', 'yum', '-y', 'localinstall', rpm_name])
remote.run(args=['rm', '-f', rpm_name])
uri = baseparms['uri']
_yum_fix_repo_priority(remote, project, uri)
_yum_fix_repo_host(remote, project)
_yum_set_check_obsoletes(remote)
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
ldir = _get_local_dir(config, remote)
for cpack in rpm:
pkg = None
if ldir:
pkg = "{ldir}/{cpack}".format(
ldir=ldir,
cpack=cpack,
)
remote.run(
args = ['if', 'test', '-e',
run.Raw(pkg), run.Raw(';'), 'then',
'sudo', 'yum', 'remove', pkg, '-y', run.Raw(';'),
'sudo', 'yum', 'install', pkg, '-y',
run.Raw(';'), 'fi']
)
if pkg is None:
remote.run(args=['sudo', 'yum', 'install', cpack, '-y'])
else:
remote.run(
args = ['if', 'test', run.Raw('!'), '-e',
run.Raw(pkg), run.Raw(';'), 'then',
'sudo', 'yum', 'install', cpack, '-y',
run.Raw(';'), 'fi'])
def verify_package_version(ctx, config, remote):
"""
Ensures that the version of package installed is what
was asked for in the config.
For most cases this is for ceph, but we also install samba
for example.
"""
# Do not verify the version if the ceph-deploy task is being used to
# install ceph. Verifying the ceph installed by ceph-deploy should work,
# but the qa suites will need reorganized first to run ceph-deploy
# before the install task.
# see: http://tracker.ceph.com/issues/11248
if config.get("extras"):
log.info("Skipping version verification...")
return True
base_url = _get_baseurl(ctx, remote, config)
version = _block_looking_for_package_version(
remote,
base_url,
config.get('wait_for_package', False)
)
pkg_to_check = config.get('project', 'ceph')
installed_ver = packaging.get_package_version(remote, pkg_to_check)
if installed_ver and version in installed_ver:
msg = "The correct {pkg} version {ver} is installed.".format(
ver=version,
pkg=pkg_to_check
)
log.info(msg)
else:
raise RuntimeError(
"{pkg} version {ver} was not installed, found {installed}.".format(
ver=version,
installed=installed_ver,
pkg=pkg_to_check
)
)
def purge_data(ctx):
"""
Purge /var/lib/ceph on every remote in ctx.
:param ctx: the argparse.Namespace object
"""
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
p.spawn(_purge_data, remote)
def _purge_data(remote):
"""
Purge /var/lib/ceph on remote.
:param remote: the teuthology.orchestra.remote.Remote object
"""
log.info('Purging /var/lib/ceph on %s', remote)
remote.run(args=[
'sudo',
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
run.Raw('||'),
'true',
run.Raw(';'),
'test', '-d', '/var/lib/ceph',
run.Raw('&&'),
'sudo',
'find', '/var/lib/ceph',
'-mindepth', '1',
'-maxdepth', '2',
'-type', 'd',
'-exec', 'umount', '{}', ';',
run.Raw(';'),
'sudo',
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
])
def install_packages(ctx, pkgs, config):
"""
Installs packages on each remote in ctx.
:param ctx: the argparse.Namespace object
:param pkgs: list of packages names to install
:param config: the config dict
"""
install_pkgs = {
"deb": _update_deb_package_list_and_install,
"rpm": _update_rpm_package_list_and_install,
}
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
system_type = teuthology.get_system_type(remote)
p.spawn(
install_pkgs[system_type],
ctx, remote, pkgs[system_type], config)
for remote in ctx.cluster.remotes.iterkeys():
# verifies that the install worked as expected
verify_package_version(ctx, config, remote)
def _remove_deb(ctx, config, remote, debs):
"""
Removes Debian packages from remote, rudely
TODO: be less rude (e.g. using --force-yes)
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: list of packages names to install
"""
log.info("Removing packages: {pkglist} on Debian system.".format(
pkglist=", ".join(debs)))
# first ask nicely
remote.run(
args=[
'for', 'd', 'in',
] + debs + [
run.Raw(';'),
'do',
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'), 'purge',
run.Raw('$d'),
run.Raw('||'),
'true',
run.Raw(';'),
'done',
])
# mop up anything that is broken
remote.run(
args=[
'dpkg', '-l',
run.Raw('|'),
# Any package that is unpacked or half-installed and also requires
# reinstallation
'grep', '^.\(U\|H\)R',
run.Raw('|'),
'awk', '{print $2}',
run.Raw('|'),
'sudo',
'xargs', '--no-run-if-empty',
'dpkg', '-P', '--force-remove-reinstreq',
])
# then let apt clean up
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'autoremove',
],
)
def _remove_rpm(ctx, config, remote, rpm):
"""
Removes RPM packages from remote
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param rpm: list of packages names to remove
"""
log.info("Removing packages: {pkglist} on rpm system.".format(
pkglist=", ".join(rpm)))
baseparms = _get_baseurlinfo_and_dist(ctx, remote, config)
dist_release = baseparms['dist_release']
remote.run(
args=[
'for', 'd', 'in',
] + rpm + [
run.Raw(';'),
'do',
'sudo', 'yum', 'remove',
run.Raw('$d'),
'-y',
run.Raw('||'),
'true',
run.Raw(';'),
'done',
])
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
projRelease = '%s-release-%s.%s.noarch' % (
config.get('project', 'ceph'), RELEASE, dist_release)
remote.run(args=['sudo', 'yum', 'erase', projRelease, '-y'])
remote.run(
args=[
'sudo', 'yum', 'clean', 'expire-cache',
])
def remove_packages(ctx, config, pkgs):
"""
Removes packages from each remote in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param pkgs: list of packages names to remove
"""
remove_pkgs = {
"deb": _remove_deb,
"rpm": _remove_rpm,
}
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
system_type = teuthology.get_system_type(remote)
p.spawn(remove_pkgs[
system_type], ctx, config, remote, pkgs[system_type])
def _remove_sources_list_deb(remote, proj):
"""
Removes /etc/apt/sources.list.d/{proj}.list and then runs ``apt-get
update``.
:param remote: the teuthology.orchestra.remote.Remote object
:param proj: the project whose sources.list needs removing
"""
remote.run(
args=[
'sudo', 'rm', '/etc/apt/sources.list.d/{proj}.list'.format(
proj=proj),
run.Raw('&&'),
'sudo', 'apt-get', 'update',
],
check_status=False,
)
def _remove_sources_list_rpm(remote, proj):
"""
Removes /etc/yum.repos.d/{proj}.repo, /var/lib/{proj}, and /var/log/{proj}
:param remote: the teuthology.orchestra.remote.Remote object
:param proj: the project whose .repo needs removing
"""
remote.run(
args=['sudo', 'rm', '/etc/yum.repos.d/{proj}.repo'.format(proj=proj)],
check_status=False,
)
# FIXME
# There probably should be a way of removing these files that is
# implemented in the yum/rpm remove procedures for the ceph package.
# FIXME but why is this function doing these things?
remote.run(
args=['sudo', 'rm', '-r', '/var/lib/{proj}'.format(proj=proj)],
check_status=False,
)
remote.run(
args=['sudo', 'rm', '-r', '/var/log/{proj}'.format(proj=proj)],
check_status=False,
)
_yum_unset_check_obsoletes(remote)
def remove_sources(ctx, config):
"""
Removes repo source files from each remote in ctx.
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
remove_sources_pkgs = {
'deb': _remove_sources_list_deb,
'rpm': _remove_sources_list_rpm,
}
with parallel() as p:
project = config.get('project', 'ceph')
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
with parallel() as p:
project = 'calamari'
log.info("Removing {proj} sources lists".format(
proj=project))
for remote in ctx.cluster.remotes.iterkeys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, remote, project)
@contextlib.contextmanager
def install(ctx, config):
"""
The install task. Installs packages for a given project on all hosts in
ctx. May work for projects besides ceph, but may not. Patches welcomed!
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
project = config.get('project', 'ceph')
debs = PACKAGES.get(project, {}).get('deb', [])
rpm = PACKAGES.get(project, {}).get('rpm', [])
# pull any additional packages out of config
extra_pkgs = config.get('extra_packages')
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
debs += extra_pkgs
rpm += extra_pkgs
# When extras is in the config we want to purposely not install ceph.
# This is typically used on jobs that use ceph-deploy to install ceph
# or when we are testing ceph-deploy directly. The packages being
# installed are needed to properly test ceph as ceph-deploy won't
# install these. 'extras' might not be the best name for this.
extras = config.get('extras')
if extras is not None:
debs = ['ceph-test', 'ceph-test-dbg', 'ceph-fuse', 'ceph-fuse-dbg',
'librados2', 'librados2-dbg', 'librbd1', 'librbd1-dbg',
'python-ceph']
rpm = ['ceph-fuse', 'librbd1', 'librados2', 'ceph-test', 'python-ceph']
# install lib deps (so we explicitly specify version), but do not
# uninstall them, as other packages depend on them (e.g., kvm)
# TODO: these can probably be removed as these packages are now included
# in PACKAGES. We've found that not uninstalling them each run can
# sometimes cause a baremetal machine to end up in a weird state so
# they were included in PACKAGES to ensure that nuke cleans them up.
proj_install_debs = {'ceph': [
'librados2',
'librados2-dbg',
'librbd1',
'librbd1-dbg',
]}
proj_install_rpm = {'ceph': [
'librbd1',
'librados2',
]}
install_debs = proj_install_debs.get(project, [])
install_rpm = proj_install_rpm.get(project, [])
# TODO: see previous todo comment. The install_debs and install_rpm
# part can and should be removed eventually as those packages are now
# present in PACKAGES.
install_info = {
"deb": debs + install_debs,
"rpm": rpm + install_rpm}
remove_info = {
"deb": debs,
"rpm": rpm}
install_packages(ctx, install_info, config)
try:
yield
finally:
remove_packages(ctx, config, remove_info)
remove_sources(ctx, config)
if project == 'ceph':
purge_data(ctx)
def _upgrade_deb_packages(ctx, config, remote, debs):
"""
Upgrade project's packages on remote Debian host
Before doing so, installs the project's GPG key, writes a sources.list
file, and runs ``apt-get update``.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param debs: the Debian packages to be installed
:param branch: the branch of the project to be used
"""
# check for ceph release key
r = remote.run(
args=[
'sudo', 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
],
stdout=StringIO(),
check_status=False,
)
if r.stdout.getvalue().find('Ceph automated package') == -1:
# if it doesn't exist, add it
remote.run(
args=[
'wget', '-q', '-O-',
'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
run.Raw('|'),
'sudo', 'apt-key', 'add', '-',
],
stdout=StringIO(),
)
# get distro name and arch
r = remote.run(
args=['lsb_release', '-sc'],
stdout=StringIO(),
)
dist = r.stdout.getvalue().strip()
r = remote.run(
args=['arch'],
stdout=StringIO(),
)
arch = r.stdout.getvalue().strip()
log.info("dist %s arch %s", dist, arch)
# branch/tag/sha1 flavor
flavor = 'basic'
sha1 = config.get('sha1')
branch = config.get('branch')
tag = config.get('tag')
uri = _get_uri(tag, branch, sha1)
base_url = 'http://{host}/{proj}-deb-{dist}-{arch}-{flavor}/{uri}'.format(
host=teuth_config.gitbuilder_host,
proj=config.get('project', 'ceph'),
dist=dist,
arch=arch,
flavor=flavor,
uri=uri,
)
log.info('Pulling from %s', base_url)
# get package version string
while True:
resp = requests.get(base_url + '/version')
if not resp.ok:
if config.get('wait_for_package'):
log.info('Package not there yet, waiting...')
time.sleep(15)
continue
raise VersionNotFoundError("%s/version" % base_url)
version = resp.text.strip()
log.info('Package version is %s', version)
break
remote.run(
args=[
'echo', 'deb', base_url, dist, 'main',
run.Raw('|'),
'sudo', 'tee', '/etc/apt/sources.list.d/{proj}.list'.format(
proj=config.get('project', 'ceph')),
],
stdout=StringIO(),
)
remote.run(args=['sudo', 'apt-get', 'update'], check_status=False)
remote.run(
args=[
'sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '--force-yes',
'-o', run.Raw('Dpkg::Options::="--force-confdef"'), '-o', run.Raw(
'Dpkg::Options::="--force-confold"'),
'install',
] + ['%s=%s' % (d, version) for d in debs],
)
def _upgrade_rpm_packages(ctx, config, remote, pkgs):
"""
Upgrade project's packages on remote RPM-based host
Before doing so, it makes sure the project's -release RPM is installed -
removing any previous version first.
:param ctx: the argparse.Namespace object
:param config: the config dict
:param remote: the teuthology.orchestra.remote.Remote object
:param pkgs: the RPM packages to be installed
:param branch: the branch of the project to be used
"""
distinfo = _get_baseurlinfo_and_dist(ctx, remote, config)
log.info(
"Host {host} is: {distro} {ver} {arch}".format(
host=remote.shortname,
distro=distinfo['distro'],
ver=distinfo['relval'],
arch=distinfo['arch'],)
)
base_url = _get_baseurl(ctx, remote, config)
log.info('Repo base URL: %s', base_url)
project = config.get('project', 'ceph')
# Remove the -release package before upgrading it
args = ['sudo', 'rpm', '-ev', '%s-release' % project]
remote.run(args=args)
# Build the new -release package path
release_rpm = "{base}/noarch/{proj}-release-{release}.{dist_release}.noarch.rpm".format(
base=base_url,
proj=project,
release=RELEASE,
dist_release=distinfo['dist_release'],
)
# Upgrade the -release package
args = ['sudo', 'rpm', '-Uv', release_rpm]
remote.run(args=args)
uri = _get_baseurlinfo_and_dist(ctx, remote, config)['uri']
_yum_fix_repo_priority(remote, project, uri)
_yum_fix_repo_host(remote, project)
_yum_set_check_obsoletes(remote)
remote.run(
args=[
'sudo', 'yum', 'clean', 'all',
])
# Actually upgrade the project packages
args = ['sudo', 'yum', '-y', 'install']
args += pkgs
remote.run(args=args)
def upgrade_old_style(ctx, node, remote, pkgs, system_type):
"""
Handle the upgrade using methods in use prior to ceph-deploy.
"""
if system_type == 'deb':
_upgrade_deb_packages(ctx, node, remote, pkgs)
elif system_type == 'rpm':
_upgrade_rpm_packages(ctx, node, remote, pkgs)
def upgrade_with_ceph_deploy(ctx, node, remote, pkgs, sys_type):
"""
Upgrade using ceph-deploy
"""
dev_table = ['branch', 'tag', 'dev']
ceph_dev_parm = ''
ceph_rel_parm = ''
for entry in node.keys():
if entry in dev_table:
ceph_dev_parm = node[entry]
if entry == 'release':
ceph_rel_parm = node[entry]
params = []
if ceph_dev_parm:
params += ['--dev', ceph_dev_parm]
if ceph_rel_parm:
params += ['--release', ceph_rel_parm]
params.append(remote.name)
subprocess.call(['ceph-deploy', 'install'] + params)
remote.run(args=['sudo', 'restart', 'ceph-all'])
def upgrade_common(ctx, config, deploy_style):
"""
Common code for upgrading
"""
assert config is None or isinstance(config, dict), \
"install.upgrade only supports a dictionary for configuration"
project = config.get('project', 'ceph')
# use 'install' overrides here, in case the upgrade target is left
# unspecified/implicit.
install_overrides = ctx.config.get(
'overrides', {}).get('install', {}).get(project, {})
log.info('project %s config %s overrides %s', project, config,
install_overrides)
# FIXME: extra_pkgs is not distro-agnostic
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
# build a normalized remote -> config dict
remotes = {}
if 'all' in config:
for remote in ctx.cluster.remotes.iterkeys():
remotes[remote] = config.get('all')
else:
for role in config.keys():
remotes_dict = ctx.cluster.only(role).remotes
if not remotes_dict:
# This is a regular config argument, not a role
continue
remote = remotes_dict.keys()[0]
if remote in remotes:
log.warn('remote %s came up twice (role %s)', remote, role)
continue
remotes[remote] = config.get(role)
for remote, node in remotes.iteritems():
if not node:
node = {}
this_overrides = copy.deepcopy(install_overrides)
if 'sha1' in node or 'tag' in node or 'branch' in node:
log.info('config contains sha1|tag|branch, removing those keys from override')
this_overrides.pop('sha1', None)
this_overrides.pop('tag', None)
this_overrides.pop('branch', None)
teuthology.deep_merge(node, this_overrides)
log.info('remote %s config %s', remote, node)
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
pkgs = PACKAGES[project][system_type]
excluded_packages = config.get('exclude_packages', list())
pkgs = list(set(pkgs).difference(set(excluded_packages)))
log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
# FIXME: again, make extra_pkgs distro-agnostic
pkgs += extra_pkgs
node['project'] = project
deploy_style(ctx, node, remote, pkgs, system_type)
verify_package_version(ctx, node, remote)
docstring_for_upgrade = """"
Upgrades packages for a given project.
For example::
tasks:
- install.{cmd_parameter}:
all:
branch: end
or specify specific roles::
tasks:
- install.{cmd_parameter}:
mon.a:
branch: end
osd.0:
branch: other
or rely on the overrides for the target version::
overrides:
install:
ceph:
sha1: ...
tasks:
- install.{cmd_parameter}:
all:
(HACK: the overrides will *only* apply the sha1/branch/tag if those
keys are not present in the config.)
It is also possible to attempt to exclude packages from the upgrade set:
tasks:
- install.{cmd_parameter}:
exclude_packages: ['ceph-test', 'ceph-test-dbg']
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
#
# __doc__ strings for upgrade and ceph_deploy_upgrade are set from
# the same string so that help(upgrade) and help(ceph_deploy_upgrade)
# look the same.
#
@contextlib.contextmanager
def upgrade(ctx, config):
upgrade_common(ctx, config, upgrade_old_style)
yield
upgrade.__doc__ = docstring_for_upgrade.format(cmd_parameter='upgrade')
@contextlib.contextmanager
def ceph_deploy_upgrade(ctx, config):
upgrade_common(ctx, config, upgrade_with_ceph_deploy)
yield
ceph_deploy_upgrade.__doc__ = docstring_for_upgrade.format(
cmd_parameter='ceph_deploy_upgrade')
@contextlib.contextmanager
def ship_utilities(ctx, config):
"""
Write a copy of valgrind.supp to each of the remote sites. Set executables used
by Ceph in /usr/local/bin. When finished (upon exit of the teuthology run), remove
these files.
:param ctx: Context
:param config: Configuration
"""
assert config is None
testdir = teuthology.get_testdir(ctx)
filenames = []
log.info('Shipping valgrind.supp...')
with file(os.path.join(os.path.dirname(__file__), 'valgrind.supp'), 'rb') as f:
fn = os.path.join(testdir, 'valgrind.supp')
filenames.append(fn)
for rem in ctx.cluster.remotes.iterkeys():
teuthology.sudo_write_file(
remote=rem,
path=fn,
data=f,
)
f.seek(0)
FILES = ['daemon-helper', 'adjust-ulimits']
destdir = '/usr/bin'
for filename in FILES:
log.info('Shipping %r...', filename)
src = os.path.join(os.path.dirname(__file__), filename)
dst = os.path.join(destdir, filename)
filenames.append(dst)
with file(src, 'rb') as f:
for rem in ctx.cluster.remotes.iterkeys():
teuthology.sudo_write_file(
remote=rem,
path=dst,
data=f,
)
f.seek(0)
rem.run(
args=[
'sudo',
'chmod',
'a=rx',
'--',
dst,
],
)
try:
yield
finally:
log.info('Removing shipped files: %s...', ' '.join(filenames))
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
] + list(filenames),
wait=False,
),
)
def get_flavor(config):
"""
Determine the flavor to use.
Flavor tells us what gitbuilder to fetch the prebuilt software
from. It's a combination of possible keywords, in a specific
order, joined by dashes. It is used as a URL path name. If a
match is not found, the teuthology run fails. This is ugly,
and should be cleaned up at some point.
"""
config = config or dict()
flavor = config.get('flavor', 'basic')
if config.get('path'):
# local dir precludes any other flavors
flavor = 'local'
else:
if config.get('valgrind'):
flavor = 'notcmalloc'
else:
if config.get('coverage'):
flavor = 'gcov'
return flavor
@contextlib.contextmanager
def task(ctx, config):
"""
Install packages for a given project.
tasks:
- install:
project: ceph
branch: bar
- install:
project: samba
branch: foo
extra_packages: ['samba']
Overrides are project specific:
overrides:
install:
ceph:
sha1: ...
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task install only supports a dictionary for configuration"
project, = config.get('project', 'ceph'),
log.debug('project %s' % project)
overrides = ctx.config.get('overrides')
if overrides:
install_overrides = overrides.get('install', {})
teuthology.deep_merge(config, install_overrides.get(project, {}))
log.debug('config %s' % config)
flavor = get_flavor(config)
log.info("Using flavor: %s", flavor)
ctx.summary['flavor'] = flavor
with contextutil.nested(
lambda: install(ctx=ctx, config=dict(
branch=config.get('branch'),
tag=config.get('tag'),
sha1=config.get('sha1'),
flavor=flavor,
extra_packages=config.get('extra_packages', []),
extras=config.get('extras', None),
wait_for_package=ctx.config.get('wait_for_package', False),
project=project,
)),
lambda: ship_utilities(ctx=ctx, config=None),
):
yield
| |
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import copy
import itertools
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.objects import network_request as net_req_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_virtual_interface
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': uuids.instance,
'network_info': '[]',
}
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.flags(use_neutron=False)
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
fakes.FAKE_PROJECT_ID)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_liberal(self, mock_get_all):
self.flags(network_manager='nova.network.manager.FlatDHCPManager')
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only="allow_none")
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, uuids.instance))
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.api.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = uuids.network_1
instance = objects.Instance(uuid=uuids.instance)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(uuids.network_1, vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, uuids.instance)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.api.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=uuids.network_1)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(uuids.network_1, vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
with mock.patch.object(self.network_api.network_rpcapi,
"allocate_for_instance") as mock_alloc:
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn',
'macs'],
itertools.repeat(mock.ANY)))
mock_alloc.return_value = []
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
instance = objects.Instance(id=1, uuid=uuids.instance,
project_id='project_id',
host='host', system_metadata={},
flavor=flavor, deleted=False)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', requested_networks=None,
macs=macs)
mock_alloc.assert_called_once_with(self.context, **kwargs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = objects.Instance(uuid=FAKE_UUID)
def fake_associate(*args, **kwargs):
return orig_instance_uuid
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
if instance_uuid == orig_instance_uuid:
self.assertIn('extra.flavor', columns_to_join)
return fake_instance.fake_db_instance(uuid=instance_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
if orig_instance_uuid:
expected_updated_instances = [new_instance.uuid,
orig_instance_uuid]
else:
expected_updated_instances = [new_instance.uuid]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
return fake_info_cache
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
with test.nested(
mock.patch.object(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate),
mock.patch.object(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid),
mock.patch.object(self.network_api, '_get_instance_nw_info',
fake_get_nw_info),
mock.patch.object(self.network_api.db,
'instance_info_cache_update',
fake_instance_info_cache_update),
mock.patch.object(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
):
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip(uuids.orig_uuid)
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
fake_instance = objects.Instance(
uuid=uuidutils.generate_uuid(dashed=False),
project_id='fake_project_id',
instance_type_id=fake_flavor['id'],
flavor=fake_flavor,
system_metadata={})
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_get_multi_addresses(*args, **kwargs):
return multi_host, ['fake_float1', 'fake_float2']
self.stub_out('nova.network.rpcapi.NetworkAPI.' + method,
fake_mig_inst_method)
self.stub_out('nova.network.api.API._get_multi_addresses',
fake_get_multi_addresses)
expected = {'instance_uuid': fake_instance.uuid,
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multihost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multihost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multihost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multihost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
with mock.patch.object(self.network_api.db, 'fixed_ip_get_by_instance',
side_effect=exception.FixedIpNotFoundForInstance(
instance_uuid=FAKE_UUID)):
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = (
self.network_api._get_multi_addresses(self.context, instance))
self.assertFalse(result)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = objects.Instance(uuid=FAKE_UUID)
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with test.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
mock.patch.object(network_model.NetworkInfo, 'hydrate'),
mock.patch.object(objects.InstanceInfoCache, 'save'),
) as (
method_mock, nwinfo_mock, hydrate_mock, save_mock
):
nw_info = network_model.NetworkInfo([])
method_mock.return_value = nw_info
hydrate_mock.return_value = nw_info
getattr(self.network_api, method)(*args, **kwargs)
hydrate_mock.assert_called_once_with(nw_info)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
vpn = 'fake-vpn'
requested_networks = [('fake-networks', None)]
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
@mock.patch('nova.network.rpcapi.NetworkAPI.allocate_for_instance')
def test_allocate_for_instance_no_nets_no_auto(self, mock_rpc_alloc):
# Tests that nothing fails if no networks are returned and auto
# allocation wasn't requested.
mock_rpc_alloc.return_value = []
instance = fake_instance.fake_instance_obj(self.context)
nw_info = self.network_api.allocate_for_instance(
self.context, instance, mock.sentinel.vpn, requested_networks=None)
self.assertEqual(0, len(nw_info))
@mock.patch('nova.network.rpcapi.NetworkAPI.allocate_for_instance')
def test_allocate_for_instance_no_nets_auto_allocate(self, mock_rpc_alloc):
# Tests that we fail when no networks are allocated and auto-allocation
# was requested.
def fake_rpc_allocate(context, *args, **kwargs):
# assert that requested_networks is nulled out
self.assertIn('requested_networks', kwargs)
self.assertIsNone(kwargs['requested_networks'])
return []
mock_rpc_alloc.side_effect = fake_rpc_allocate
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.UnableToAutoAllocateNetwork,
self.network_api.allocate_for_instance,
self.context, instance, mock.sentinel.vpn,
[(net_req_obj.NETWORK_ID_AUTO, None)])
self.assertEqual(1, mock_rpc_alloc.call_count)
@mock.patch('nova.network.rpcapi.NetworkAPI.deallocate_for_instance')
def test_deallocate_for_instance_auto_allocate(self, mock_rpc_dealloc):
# Tests that we pass requested_networks=None to the RPC API when
# we're auto-allocating.
instance = fake_instance.fake_instance_obj(self.context)
req_net = objects.NetworkRequest(
network_id=net_req_obj.NETWORK_ID_AUTO)
requested_networks = objects.NetworkRequestList(objects=[req_net])
self.network_api.deallocate_for_instance(
self.context, instance, requested_networks)
mock_rpc_dealloc.assert_called_once_with(self.context,
instance=instance,
requested_networks=None)
def test_add_fixed_ip_to_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
instance = fake_instance.fake_instance_obj(self.context)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.api.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = uuids.instance)
self.assertEqual(uuids.instance,
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertIsNone(
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API.migrate_instance_start')
def test_cleanup_instance_network_on_host(self, fake_migrate_start):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.cleanup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_start.assert_called_once_with(
self.context, instance,
{'source_compute': 'fake_compute_source', 'dest_compute': None})
@mock.patch('nova.network.api.API.migrate_instance_finish')
def test_setup_instance_network_on_host(self, fake_migrate_finish):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.setup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_finish.assert_called_once_with(
self.context, instance,
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(api.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
def test_get_instance_nw_info(self, mock_update, mock_get, mock_lock):
fake_result = mock.sentinel.get_nw_info_result
mock_get.return_value = fake_result
instance = fake_instance.fake_instance_obj(self.context)
result = self.network_api.get_instance_nw_info(self.context, instance)
mock_get.assert_called_once_with(self.context, instance)
mock_update.assert_called_once_with(self.network_api, self.context,
instance, nw_info=fake_result,
update_cells=False)
self.assertEqual(fake_result, result)
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.api.instance_info_cache_update')
class TestUpdateInstanceCache(test.NoDBTestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = objects.Instance(uuid=FAKE_UUID, deleted=False)
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
info_cache = copy.deepcopy(fake_info_cache)
info_cache.update({'network_info': self.nw_json})
db_mock.return_value = info_cache
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
self.assertEqual(self.nw_info, self.instance.info_cache.network_info)
def test_update_nw_info_none_instance_deleted(self, db_mock, api_mock):
instance = objects.Instance(uuid=FAKE_UUID, deleted=True)
base_api.update_instance_cache_with_nw_info(
api_mock, self.context, instance)
self.assertFalse(api_mock.called)
def test_update_nw_info_one_network(self, db_mock, api_mock):
info_cache = copy.deepcopy(fake_info_cache)
info_cache.update({'network_info': self.nw_json})
db_mock.return_value = info_cache
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
self.assertEqual(self.nw_info, self.instance.info_cache.network_info)
def test_update_nw_info_empty_list(self, db_mock, api_mock):
new_nw_info = network_model.NetworkInfo([])
db_mock.return_value = fake_info_cache
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, new_nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
self.assertEqual(new_nw_info, self.instance.info_cache.network_info)
def test_decorator_return_object(self, db_mock, api_mock):
db_mock.return_value = fake_info_cache
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
db_mock.return_value = fake_info_cache
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance.uuid,
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
| |
# Quick and dirty milling code to drill some holes in a panel
from __future__ import print_function
import serial
import time
class RouterControl:
def __init__(self):
self.serialPort = None
def Open(self, serialPortName):
# Serial port for robot comms
if serialPortName is not "":
try:
self.serialPort = serial.Serial(serialPortName, baudrate=115200, timeout=3.0)
except:
print("Serial port " + serialPortName + " cannot be opened")
exit(0)
else:
print("Serial port cannot be empty")
exit(0)
def WriteCmd(self, cmdStr):
self.serialPort.write(cmdStr)
self.serialPort.write(b'\r\n')
rxStr = ""
gotChevron = False
for i in range(15000):
bytesWaiting = 0
try:
bytesWaiting = self.serialPort.in_waiting()
except AttributeError:
bytesWaiting = self.serialPort.inWaiting()
if bytesWaiting > 0:
ch = self.serialPort.read(1).decode("utf-8")
print(ch, end="")
if gotChevron:
if ch == ">":
rsltStr = rxStr
return rsltStr
else:
rxStr += ch
elif ch == "<":
gotChevron = True
time.sleep(0.001)
return "Timeout"
def GoToPoint(self, point):
cmdStr = "G0 {0:.2f} {1:.2f}".format(point[0], point[1])
print("Sending", cmdStr)
rslt = self.WriteCmd(cmdStr)
print("Result", rslt)
def Step(self, armIdx, steps):
cmdStr = "S" + str(armIdx) + " {0:d}".format(steps)
print("Sending", cmdStr)
rslt = self.WriteCmd(cmdStr)
print("Result", rslt)
def Drill(self, doDrill):
if doDrill:
cmdStr = "V0 10.0"
else:
cmdStr = "V0 0.0"
print("Sending", cmdStr)
rslt = self.WriteCmd(cmdStr)
print("Result", rslt)
batOutlinePoints = [
[4.501693, 32.209405, 0.0],[6.554918, 31.946409, 0.0],[8.487224, 32.688767, 0.0],[10.076201, 34.015437, 0.0],
[10.972057, 35.88154, 0.0],[10.761726, 37.940826, 0.0],[9.763341, 39.754146, 0.0],[8.485961, 41.383011, 0.0],
[10.52521, 41.027535, 0.0],[12.267153, 39.909261, 0.0],[13.906055, 38.644785, 0.0],[15.37165, 37.18296, 0.0],
[16.614563, 35.527646, 0.0],[17.661677, 33.742022, 0.0],[18.531526, 31.863655, 0.0],[19.207161, 29.907021, 0.0],
[19.674219, 27.8904, 0.0],[19.922269, 25.835316, 0.0],[19.945873, 23.765451, 0.0],[19.74516, 21.705205, 0.0],
[19.325782, 19.678132, 0.0],[18.698268, 17.705538, 0.0],[17.876922, 15.805462, 0.0],[16.854088, 14.00582, 0.0],
[15.63849, 12.330344, 0.0],[14.255131, 10.790468, 0.0],[12.702171, 9.421813, 0.0],[10.962991, 8.299248, 0.0],
[11.558234, 10.281818, 0.0],[12.540634, 12.103847, 0.0],[13.073751, 14.104019, 0.0],[13.074049, 16.174019, 0.0],
[12.382265, 18.125002, 0.0],[10.319134, 18.293501, 0.0],[8.756659, 16.935719, 0.0],[7.723393, 15.142046, 0.0],
[6.576421, 16.865228, 0.0],[5.995555, 18.852058, 0.0],[4.07789, 19.631457, 0.0],[2.812871, 17.992974, 0.0],
[1.900939, 16.134674, 0.0],[1.237007, 14.174037, 0.0],[0.635916, 12.193232, 0.0],[0.098021, 10.19434, 0.0],
[-0.721135, 12.095362, 0.0],[-1.310417, 14.079713, 0.0],[-1.994545, 16.033393, 0.0],[-2.871206, 17.908591, 0.0],
[-4.101999, 19.572937, 0.0],[-6.057316, 18.8935, 0.0],[-6.705574, 16.927626, 0.0],[-7.048689, 14.886261, 0.0],
[-8.494477, 16.367679, 0.0],[-9.841972, 17.939034, 0.0],[-11.854503, 18.423406, 0.0],[-13.14168, 16.802272, 0.0],
[-13.305115, 14.738734, 0.0],[-12.898023, 12.709158, 0.0],[-12.008294, 10.840126, 0.0],[-10.912576, 9.083908, 0.0],
[-12.93821, 9.510176, 0.0],[-14.492802, 10.876977, 0.0],[-15.863293, 12.428317, 0.0],[-17.046535, 14.126798, 0.0],
[-18.042134, 15.941648, 0.0],[-18.850334, 17.847354, 0.0],[-19.470993, 19.822115, 0.0],[-19.90428, 21.84626, 0.0],
[-20.07648, 23.909085, 0.0],[-19.993312, 25.977414, 0.0],[-19.751783, 28.033275, 0.0],[-19.295424, 30.052343, 0.0],
[-18.60326, 32.003191, 0.0],[-17.705231, 33.86825, 0.0],[-16.646945, 35.647275, 0.0],[-15.361882, 37.270085, 0.0],
[-13.881192, 38.716618, 0.0],[-12.254034, 39.996171, 0.0],[-10.515006, 41.118973, 0.0],[-8.680731, 42.078313, 0.0],
[-9.559682, 40.204188, 0.0],[-10.657352, 38.449189, 0.0],[-11.162232, 36.441704, 0.0],[-10.530289, 34.470524, 0.0],
[-9.070217, 33.003183, 0.0],[-7.217935, 32.079088, 0.0],[-5.172067, 31.763935, 0.0],[-4.299535, 33.641057, 0.0],
[-3.992677, 35.688187, 0.0],[-3.743213, 37.7431, 0.0],[-3.52807, 39.801889, 0.0],[-3.321159, 41.861522, 0.0],
[-3.044258, 43.912918, 0.0],[-2.020332, 42.113897, 0.0],[-1.198958, 40.213833, 0.0],[0.856909, 39.972353, 0.0],
[1.780854, 41.82471, 0.0],[2.649713, 43.703535, 0.0],[3.219862, 41.713603, 0.0],[3.41985, 39.653286, 0.0],
[3.636929, 37.5947, 0.0],[3.882326, 35.539298, 0.0],[4.188339, 33.492042, 0.0]
]
batSpeakerGrillPoints = [
# [6.000155, 25.816089, 0.0], [7.751163, 26.066329, 0.0], [9.452577, 26.549207, 0.0], [11.070858, 27.262612, 0.0],
# [12.571857, 28.197748, 0.0], [13.528641, 27.929811, 0.0], [13.863793, 26.19316, 0.0], [13.96254, 24.427278, 0.0],
# [12.608755, 24.396578, 0.0], [10.844957, 24.497544, 0.0], [9.117801, 24.293174, 0.0], [7.636078, 25.25417, 0.0],
# [-6.260487, 25.800757, 0.0], [-7.919129, 25.197869, 0.0], [-9.393896, 24.278791, 0.0], [-11.147353, 24.493936, 0.0],
# [-12.906905, 24.335031, 0.0], [-14.060206, 24.566091, 0.0], [-13.934659, 26.331128, 0.0],
# [-13.617565, 28.071104, 0.0], [-12.596144, 28.147758, 0.0], [-11.088919, 27.222401, 0.0],
# [-9.470319, 26.509468, 0.0], [-7.769857, 26.023311, 0.0], [0.806358, 30.67823, 0.0], [1.555158, 29.076103, 0.0],
# [2.83854, 27.922914, 0.0], [4.588746, 27.725519, 0.0], [6.353107, 27.861176, 0.0], [8.08666, 28.209002, 0.0],
# [9.740951, 28.832811, 0.0], [11.267888, 29.723739, 0.0], [12.616341, 30.866631, 0.0], [13.734195, 32.235688, 0.0],
# [14.556748, 30.987486, 0.0], [15.197734, 29.338566, 0.0], [15.652386, 27.628928, 0.0], [15.907682, 25.878452, 0.0],
# [15.955999, 24.110133, 0.0], [15.796997, 22.3483, 0.0], [15.401965, 20.757504, 0.0], [14.0103, 21.838079, 0.0],
# [12.344138, 22.412782, 0.0], [10.580951, 22.470988, 0.0], [8.873775, 22.026591, 0.0], [7.534106, 22.989395, 0.0],
# [5.954766, 23.769049, 0.0], [4.202754, 23.91974, 0.0], [2.524035, 23.392412, 0.0], [1.11282, 22.3345, 0.0],
# [-0.018724, 20.976456, 0.0], [-1.136818, 22.274457, 0.0], [-2.544497, 23.337458, 0.0], [-4.213613, 23.898395, 0.0],
# [-5.970237, 23.814785, 0.0], [-7.567909, 23.076149, 0.0], [-8.886173, 22.017788, 0.0], [-10.593991, 22.458172, 0.0],
# [-12.357458, 22.402958, 0.0], [-14.033259, 21.853966, 0.0], [-15.452695, 20.81055, 0.0],
# [-15.916057, 22.261183, 0.0], [-16.075428, 24.020287, 0.0], [-15.996221, 25.788259, 0.0],
# [-15.777099, 27.543909, 0.0], [-15.355778, 29.261731, 0.0], [-14.714483, 30.910243, 0.0],
# [-13.8958, 32.308548, 0.0], [-12.788424, 30.931479, 0.0], [-11.444599, 29.783021, 0.0], [-9.927335, 28.87528, 0.0],
# [-8.284679, 28.221155, 0.0], [-6.557632, 27.843279, 0.0], [-4.79176, 27.781017, 0.0], [-3.097375, 28.253118, 0.0],
# [-1.72033, 29.345992, 0.0], [-0.848701, 30.876601, 0.0], [-0.424475, 32.593016, 0.0], [-0.141338, 34.340064, 0.0],
# [0.105326, 33.66999, 0.0], [0.423767, 31.929261, 0.0], [2.654026, 31.443817, 0.0], [3.575391, 29.958374, 0.0],
# [5.309923, 29.772879, 0.0], [7.062073, 30.010138, 0.0], [8.737401, 30.571963, 0.0], [10.262174, 31.464306, 0.0],
# [11.555412, 32.66717, 0.0], [12.546036, 34.129685, 0.0], [12.974835, 35.834085, 0.0], [13.613116, 36.149791, 0.0],
# [14.715453, 34.766807, 0.0], [15.647009, 33.262621, 0.0], [16.438022, 31.679888, 0.0], [17.075861, 30.029557, 0.0],
# [17.548455, 28.324576, 0.0], [17.846108, 26.580577, 0.0], [17.961892, 24.815164, 0.0], [17.894187, 23.047252, 0.0],
# [17.644785, 21.29569, 0.0], [17.220409, 19.578066, 0.0], [16.630782, 17.909877, 0.0], [15.887258, 16.304346, 0.0],
# [15.110929, 15.238669, 0.0], [14.987395, 17.002552, 0.0], [14.447361, 18.679101, 0.0], [13.257203, 19.963063, 0.0],
# [11.587448, 20.494599, 0.0], [9.846772, 20.259447, 0.0], [8.324541, 19.373573, 0.0], [7.359308, 20.392446, 0.0],
# [6.051437, 21.568729, 0.0], [4.35107, 21.922569, 0.0], [2.782632, 21.155407, 0.0], [1.618603, 19.827903, 0.0],
# [0.703377, 18.314392, 0.0], [-0.024576, 16.702087, 0.0], [-0.724576, 18.110713, 0.0], [-1.588584, 19.654118, 0.0],
# [-2.713609, 21.014537, 0.0], [-4.242506, 21.86888, 0.0], [-5.969051, 21.700825, 0.0], [-7.317305, 20.577635, 0.0],
# [-8.234362, 19.186191, 0.0], [-9.685038, 20.184099, 0.0], [-11.414371, 20.492684, 0.0], [-13.120105, 20.07455, 0.0],
# [-14.420531, 18.905818, 0.0], [-15.085157, 17.274518, 0.0], [-15.312191, 15.522095, 0.0],
# [-16.001243, 16.333709, 0.0], [-16.741984, 17.940538, 0.0], [-17.331923, 19.608692, 0.0],
# [-17.770918, 21.322795, 0.0], [-18.04882, 23.069796, 0.0], [-18.052791, 24.838732, 0.0],
# [-17.929152, 26.604049, 0.0], [-17.65771, 28.352398, 0.0], [-17.199148, 30.060946, 0.0],
# [-16.54962, 31.706533, 0.0], [-15.744967, 33.282493, 0.0], [-14.809607, 34.783926, 0.0],
# [-13.67862, 36.143783, 0.0], [-13.094216, 35.637256, 0.0], [-12.543571, 33.962378, 0.0],
# [-11.533848, 32.515277, 0.0], [-10.20261, 31.354254, 0.0], [-8.660642, 30.490987, 0.0], [-6.980051, 29.94487, 0.0],
# [-5.223324, 29.761485, 0.0], [-3.577707, 30.311835, 0.0], [-2.652649, 31.789854, 0.0], [-2.29056, 33.521575, 0.0],
# [-2.03078, 35.272327, 0.0], [-1.810713, 37.028554, 0.0], [-0.886239, 37.972353, 0.0], [0.883761, 37.972353, 0.0],
# [1.701519, 36.908849, 0.0], [1.915935, 35.151905, 0.0], [2.174059, 33.400926, 0.0], [2.566354, 31.676737, 0.0],
# [4.501693, 32.209405, 0.0], [6.131741, 31.867555, 0.0], [7.827822, 32.356521, 0.0], [9.33335, 33.276141, 0.0],
# [10.501473, 34.596516, 0.0], [10.997184, 36.262203, 0.0], [10.74066, 37.988101, 0.0], [9.901676, 39.544484, 0.0],
# [8.839459, 40.95826, 0.0], [8.227924, 42.259486, 0.0], [9.800719, 41.450927, 0.0], [11.318049, 40.539843, 0.0],
# [12.779783, 39.542349, 0.0], [14.152704, 38.426394, 0.0], [15.389739, 37.162031, 0.0], [16.462669, 35.755556, 0.0],
# [17.3881, 34.24738, 0.0], [18.18653, 32.668269, 0.0], [18.848094, 31.027148, 0.0], [19.362889, 29.334279, 0.0],
# [19.722729, 27.601872, 0.0], [19.921712, 25.843731, 0.0], [19.956668, 24.074715, 0.0], [19.827406, 22.310076, 0.0],
# [19.536718, 20.564729, 0.0], [19.090135, 18.852597, 0.0], [18.495488, 17.186055, 0.0], [17.762189, 15.575664, 0.0],
# [16.878179, 14.043104, 0.0], [15.85316, 12.600835, 0.0], [14.704462, 11.254982, 0.0], [13.43481, 10.022807, 0.0],
# [12.033725, 8.943284, 0.0], [10.476038, 8.190001, 0.0], [11.236914, 9.747135, 0.0], [12.126682, 11.277035, 0.0],
# [12.843408, 12.892323, 0.0], [13.103544, 14.637249, 0.0], [13.051088, 16.405046, 0.0], [12.46249, 18.044714, 0.0],
# [10.820937, 18.462692, 0.0], [9.315519, 17.576663, 0.0], [8.250927, 16.16925, 0.0], [7.417796, 14.608715, 0.0],
# [6.822701, 15.585096, 0.0], [6.480385, 17.321588, 0.0], [5.911779, 18.98283, 0.0], [4.528176, 19.915874, 0.0],
# [3.271154, 18.700874, 0.0], [2.36946, 17.179723, 0.0], [1.684997, 15.548893, 0.0], [1.140146, 13.864887, 0.0],
# [0.629234, 12.170254, 0.0], [0.175225, 10.459835, 0.0], [-0.431081, 11.077004, 0.0], [-0.922168, 12.777478, 0.0],
# [-1.433578, 14.471946, 0.0], [-2.035858, 16.135903, 0.0], [-2.780005, 17.740987, 0.0], [-3.75161, 19.216366, 0.0],
# [-5.241537, 19.821122, 0.0], [-6.299684, 18.427656, 0.0], [-6.745214, 16.718452, 0.0], [-7.030554, 14.971801, 0.0],
# [-7.874259, 15.188537, 0.0], [-8.739062, 16.728838, 0.0], [-9.942062, 18.019237, 0.0], [-11.598104, 18.473322, 0.0],
# [-12.930595, 17.458767, 0.0], [-13.298716, 15.735383, 0.0], [-13.223643, 13.970689, 0.0],
# [-12.732318, 12.274365, 0.0], [-11.925516, 10.700823, 0.0], [-10.984122, 9.20201, 0.0], [-10.999017, 8.266301, 0.0],
# [-12.505988, 9.191193, 0.0], [-13.885179, 10.29897, 0.0], [-15.132053, 11.554042, 0.0], [-16.243101, 12.93096, 0.0],
# [-17.21722, 14.408032, 0.0], [-18.054338, 15.966917, 0.0], [-18.754591, 17.591938, 0.0], [-19.3179, 19.269389, 0.0],
# [-19.743812, 20.986893, 0.0], [-20.02654, 22.733432, 0.0], [-20.06563, 24.501712, 0.0],
# [-19.970333, 26.268921, 0.0], [-19.753154, 28.025059, 0.0], [-19.378673, 29.754261, 0.0],
# [-18.830962, 31.436573, 0.0], [-18.122397, 33.057902, 0.0], [-17.285952, 34.617383, 0.0],
# [-16.323826, 36.10176, 0.0], [-15.18831, 37.458251, 0.0], [-13.915196, 38.686889, 0.0],
# [-12.534506, 39.793537, 0.0], [-11.068483, 40.784565, 0.0], [-9.533839, 41.665756, 0.0],
# [-8.195539, 41.931736, 0.0], [-9.296936, 40.547875, 0.0], [-10.307592, 39.09621, 0.0], [-11.02442, 37.483843, 0.0],
# [-11.076175, 35.732893, 0.0], [-10.304246, 34.155825, 0.0], [-9.00831, 32.960235, 0.0], [-7.437422, 32.154762, 0.0],
# [-5.713956, 31.776456, 0.0], [-4.495052, 32.652967, 0.0], [-4.175956, 34.39361, 0.0], [-3.93219, 36.146715, 0.0],
# [-3.725586, 37.904585, 0.0], [-3.541949, 39.66503, 0.0], [-3.365121, 41.426175, 0.0], [-3.17581, 43.185982, 0.0],
# [-2.520681, 43.236209, 0.0], [-1.804372, 41.617666, 0.0], [-1.103651, 39.992278, 0.0], [0.65323, 39.972353, 0.0],
# [1.564257, 41.313161, 0.0], [2.253838, 42.940482, 0.0], [3.026872, 43.384595, 0.0], [3.228173, 41.626351, 0.0],
# [3.398674, 39.864584, 0.0], [3.581147, 38.10402, 0.0], [3.781451, 36.345402, 0.0], [4.012323, 34.590565, 0.0],
# [4.317128, 32.847422, 0.0], [-0.17624, 28.074732, 0.0], [0.941383, 26.713555, 0.0], [2.513248, 25.934175, 0.0],
# [2.314245, 25.493378, 0.0], [0.763351, 24.649673, 0.0], [-0.605421, 24.458694, 0.0], [-2.125402, 25.357772, 0.0],
# [-3.817426, 25.86088, 0.0], [-2.377542, 26.379417, 0.0], [-0.89757, 27.339846, 0.0]
[4.501693, 32.209405, 0.0], [6.831924, 32.012763, 0.0], [9.066105, 33.067279, 0.0], [10.699832, 34.916022, 0.0],
[10.967535, 37.350054, 0.0], [9.881583, 39.575479, 0.0], [8.362829, 41.540218, 0.0], [9.537527, 41.599607, 0.0],
[11.665236, 40.31599, 0.0], [13.66832, 38.846933, 0.0], [15.437845, 37.105966, 0.0], [16.891218, 35.092844, 0.0],
[18.075295, 32.909176, 0.0], [18.993258, 30.601175, 0.0], [19.617665, 28.197223, 0.0], [19.928731, 25.733141, 0.0],
[19.916666, 23.249549, 0.0], [19.583133, 20.788374, 0.0], [18.940917, 18.389058, 0.0], [18.011913, 16.085398, 0.0],
[16.797655, 13.919224, 0.0], [15.307977, 11.931755, 0.0], [13.578194, 10.149706, 0.0], [11.592737, 8.660446, 0.0],
[10.960171, 9.294306, 0.0], [12.21377, 11.4399, 0.0], [13.036922, 13.770284, 0.0], [13.067137, 16.251348, 0.0],
[12.008762, 18.36728, 0.0], [9.686367, 17.902258, 0.0], [8.1422, 15.976132, 0.0], [6.955876, 14.762879, 0.0],
[6.50505, 17.206606, 0.0], [5.536193, 19.446908, 0.0], [3.546065, 19.065633, 0.0], [2.255982, 16.947396, 0.0],
[1.380149, 14.623892, 0.0], [0.651788, 12.247699, 0.0], [-0.11847, 9.991652, 0.0], [-0.803194, 12.374799, 0.0],
[-1.525807, 14.752684, 0.0], [-2.440226, 17.062259, 0.0], [-3.723198, 19.183132, 0.0], [-5.728264, 19.361824, 0.0],
[-6.672259, 17.094381, 0.0], [-7.108598, 14.648755, 0.0], [-8.345035, 16.123924, 0.0], [-9.936705, 18.015071, 0.0],
[-12.270398, 18.262079, 0.0], [-13.266904, 16.079899, 0.0], [-13.155549, 13.607692, 0.0],
[-12.265201, 11.295865, 0.0], [-10.96845, 9.176251, 0.0], [-11.651133, 8.626968, 0.0], [-13.653731, 10.094143, 0.0],
[-15.399178, 11.860192, 0.0], [-16.876336, 13.856625, 0.0], [-18.083375, 16.027396, 0.0],
[-19.020523, 18.327793, 0.0], [-19.687295, 20.720692, 0.0], [-20.060233, 23.175362, 0.0],
[-20.014725, 25.659187, 0.0], [-19.735769, 28.127622, 0.0], [-19.146561, 30.540031, 0.0],
[-18.224743, 32.846139, 0.0], [-17.041883, 35.030914, 0.0], [-15.573627, 37.032272, 0.0],
[-13.808843, 38.779409, 0.0], [-11.834739, 40.287069, 0.0], [-9.708737, 41.572382, 0.0],
[-8.499831, 41.514338, 0.0], [-10.020981, 39.550839, 0.0], [-11.067315, 37.311202, 0.0],
[-10.773567, 34.894518, 0.0], [-9.140259, 33.053043, 0.0], [-6.90969, 31.98647, 0.0], [-4.637706, 32.174065, 0.0],
[-4.144168, 34.606084, 0.0], [-3.819169, 37.070045, 0.0], [-3.554529, 39.541281, 0.0], [-3.305526, 42.014164, 0.0],
[-2.720751, 43.654402, 0.0], [-1.704311, 41.386627, 0.0], [-0.149605, 39.972353, 0.0], [1.52858, 41.233322, 0.0],
[2.538399, 43.500212, 0.0], [3.186922, 42.051027, 0.0], [3.427507, 39.577313, 0.0], [3.692026, 37.106061, 0.0],
[4.005079, 34.640554, 0.0], [2.654026, 31.443817, 0.0], [5.186773, 29.764417, 0.0], [8.511032, 30.473454, 0.0],
[11.308254, 32.391443, 0.0], [12.922633, 35.343201, 0.0], [14.352824, 35.266189, 0.0], [16.124585, 32.35089, 0.0],
[17.338973, 29.162797, 0.0], [17.919886, 25.80166, 0.0], [17.822286, 22.392198, 0.0], [17.058925, 19.067421, 0.0],
[15.689927, 15.942529, 0.0], [14.935897, 17.286608, 0.0], [13.115329, 20.047791, 0.0], [9.807486, 20.245098, 0.0],
[7.408772, 20.327528, 0.0], [4.554988, 21.943563, 0.0], [1.811983, 20.093434, 0.0], [0.14345, 17.121612, 0.0],
[-1.28549, 19.165111, 0.0], [-3.588626, 21.626663, 0.0], [-6.753666, 21.174955, 0.0], [-8.867538, 19.713104, 0.0],
[-12.110158, 20.415105, 0.0], [-14.699915, 18.401001, 0.0], [-15.352051, 15.18929, 0.0],
[-16.863597, 18.247877, 0.0], [-17.812324, 21.525703, 0.0], [-18.049473, 24.922316, 0.0],
[-17.665761, 28.313429, 0.0], [-16.61675, 31.557743, 0.0], [-14.974575, 34.549481, 0.0],
[-13.14401, 36.044237, 0.0], [-11.878602, 32.92233, 0.0], [-9.252424, 30.776039, 0.0], [-6.002758, 29.793467, 0.0],
[-3.014593, 30.964226, 0.0], [-2.174307, 34.255085, 0.0], [-1.742124, 37.643981, 0.0], [1.379558, 37.972353, 0.0],
[1.965456, 34.782858, 0.0], [0.806358, 30.67823, 0.0], [3.149469, 27.837074, 0.0], [6.991413, 27.955771, 0.0],
[10.595034, 29.287777, 0.0], [13.456935, 31.844811, 0.0], [15.148347, 29.487154, 0.0], [15.921066, 25.712892, 0.0],
[15.717114, 21.866108, 0.0], [13.269643, 22.167702, 0.0], [9.471883, 22.24163, 0.0], [6.252596, 23.672436, 0.0],
[2.515632, 23.38794, 0.0], [-0.209699, 21.177591, 0.0], [-3.138074, 23.613116, 0.0], [-6.886413, 23.478964, 0.0],
[-10.129324, 22.39003, 0.0], [-13.901207, 21.918684, 0.0], [-15.946646, 22.434118, 0.0],
[-15.951703, 26.284558, 0.0], [-15.083983, 30.03629, 0.0], [-13.211132, 31.393497, 0.0],
[-10.197696, 29.013482, 0.0], [-6.538753, 27.840833, 0.0], [-2.798793, 28.41914, 0.0], [-0.65576, 31.512059, 0.0],
[-0.006882, 34.444812, 0.0], [-6.260487, 25.800757, 0.0], [-8.95551, 24.539064, 0.0], [-11.835991, 24.476255, 0.0],
[-14.054595, 24.714572, 0.0], [-13.708884, 27.690028, 0.0], [-11.898682, 27.684136, 0.0],
[-9.18975, 26.411545, 0.0], [-0.17624, 28.074732, 0.0], [1.474623, 26.337979, 0.0], [2.751913, 25.647327, 0.0],
[0.610729, 24.538317, 0.0], [-1.30217, 24.930574, 0.0], [-3.546822, 25.811377, 0.0], [-2.040088, 26.549316, 0.0],
[6.000155, 25.816089, 0.0], [8.968624, 26.385909, 0.0], [11.721869, 27.63157, 0.0], [13.586934, 27.699462, 0.0],
[13.96298, 24.700454, 0.0], [11.628642, 24.498829, 0.0], [8.742035, 24.583996, 0.0]
]
# squarePoints = []
# for i in range(7):
# for j in range(7):
# squarePoints.append([(i - 3) * 10, 170 - (j * 10)])
# backForthPoints = []
# for i in range(5):
# for j in range(10):
# backForthPoints.append([20-j*2,170])
precalculatedSteps = [
[-865,919],[24,14],[9,27],[-2,33],[-16,34],[-29,26],[-34,16],[-34,10],[27,14],[32,0],[34,-4],[34,-8],[35,-14],[35,-18],
[35,-21],[34,-26],[34,-28],[34,-33],[32,-35],[32,-38],[30,-40],[30,-42],[28,-44],[26,-45],[24,-45],[22,-46],[19,-44],
[13,-41],[-47,55],[-33,51],[-39,48],[-42,43],[-44,34],[-23,-12],[10,-38],[26,-43],[-45,26],[-43,32],[-32,-1],[19,-40],
[28,-42],[35,-45],[38,-48],[44,-50],[-53,37],[-49,38],[-46,33],[-45,28],[-41,19],[-4,-29],[33,-42],[40,-43],[-42,16],
[-42,17],[-25,-11],[23,-42],[43,-41],[49,-38],[51,-34],[54,-32],[-25,-9],[-46,18],[-47,22],[-47,24],[-47,25],[-45,28],
[-43,28],[-42,29],[-39,31],[-36,32],[-33,32],[-30,33],[-26,33],[-22,34],[-18,34],[-13,34],[-9,33],[-3,32],[0,32],
[3,30],[18,-31],[14,-33],[24,-30],[35,-19],[34,-5],[30,6],[23,15],[-20,34],[-26,30],[-27,29],[-26,29],[-25,27],
[-24,28],[33,-12],[33,-16],[24,17],[-15,32],[-15,32],[31,-20],[29,-24],[29,-25],[30,-26],[32,-25]
]
def getUserOkToContinue(promptStr):
print(promptStr)
inStr = ""
try:
inStr = raw_input("")
except:
inStr = input("")
routerYOrigin = 200
router = RouterControl()
router.Open("/dev/ttyAMA0")
router.Drill(False)
router.GoToPoint((0,routerYOrigin-26))
getUserOkToContinue("Position work piece and press enter")
USE_PRE_CALCULATED_STEPS = False
if USE_PRE_CALCULATED_STEPS:
for aStep in precalculatedSteps:
router.Step(0, aStep[0])
router.Step(1, aStep[1])
time.sleep(0.3)
router.Drill(True)
time.sleep(0.1)
router.Drill(False)
time.sleep(0.1)
else:
STOP_BEFORE_FIRST = True
SKIP_FIRST_N_POINTS = 11
pointIdx = 0
drillPoints = []
for point in batSpeakerGrillPoints:
# drillPoints.append([(point[0] - batCentreX) * batScaleX, routerYOrigin - ((point[1] - batMaxY) * batScaleY)])
drillPoints.append([point[0], routerYOrigin - point[1]])
for drillPoint in drillPoints:
# cmdStr = "G0 {0:.0f} {1:.0f}".format(drillPoint[0], drillPoint[1])
# print(cmdStr)
if pointIdx < SKIP_FIRST_N_POINTS:
pointIdx += 1
continue
router.GoToPoint(drillPoint)
time.sleep(0.3)
if STOP_BEFORE_FIRST:
getUserOkToContinue("Now over 1st point ... press enter")
STOP_BEFORE_FIRST = False
router.Drill(True)
time.sleep(.1)
router.Drill(False)
time.sleep(.1)
pointIdx += 1
| |
"""Parse tree transformation module.
Transforms Python source code into an abstract syntax tree (AST)
defined in the ast module.
The simplest ways to invoke this module are via parse and parseFile.
parse(buf) -> AST
parseFile(path) -> AST
"""
# Original version written by Greg Stein (gstein@lyra.org)
# and Bill Tutt (rassilon@lima.mudlib.org)
# February 1997.
#
# Modifications and improvements for Python 2.0 by Jeremy Hylton and
# Mark Hammond
#
# Some fixes to try to have correct line number on almost all nodes
# (except Module, Discard and Stmt) added by Sylvain Thenault
#
# Portions of this file are:
# Copyright (C) 1997-1998 Greg Stein. All Rights Reserved.
#
# This module is provided under a BSD-ish license. See
# http://www.opensource.org/licenses/bsd-license.html
# and replace OWNER, ORGANIZATION, and YEAR as appropriate.
from compiler.ast import *
import parser
import symbol
import token
class WalkerError(StandardError):
pass
from compiler.consts import CO_VARARGS, CO_VARKEYWORDS
from compiler.consts import OP_ASSIGN, OP_DELETE, OP_APPLY
def parseFile(path):
f = open(path, "U")
# XXX The parser API tolerates files without a trailing newline,
# but not strings without a trailing newline. Always add an extra
# newline to the file contents, since we're going through the string
# version of the API.
src = f.read() + "\n"
f.close()
return parse(src)
def parse(buf, mode="exec"):
if mode == "exec" or mode == "single":
return Transformer().parsesuite(buf)
elif mode == "eval":
return Transformer().parseexpr(buf)
else:
raise ValueError("compile() arg 3 must be"
" 'exec' or 'eval' or 'single'")
def asList(nodes):
l = []
for item in nodes:
if hasattr(item, "asList"):
l.append(item.asList())
else:
if type(item) is type( (None, None) ):
l.append(tuple(asList(item)))
elif type(item) is type( [] ):
l.append(asList(item))
else:
l.append(item)
return l
def extractLineNo(ast):
if not isinstance(ast[1], tuple):
# get a terminal node
return ast[2]
for child in ast[1:]:
if isinstance(child, tuple):
lineno = extractLineNo(child)
if lineno is not None:
return lineno
def Node(*args):
kind = args[0]
if nodes.has_key(kind):
try:
return nodes[kind](*args[1:])
except TypeError:
print nodes[kind], len(args), args
raise
else:
raise WalkerError, "Can't find appropriate Node type: %s" % str(args)
#return apply(ast.Node, args)
class Transformer:
"""Utility object for transforming Python parse trees.
Exposes the following methods:
tree = transform(ast_tree)
tree = parsesuite(text)
tree = parseexpr(text)
tree = parsefile(fileob | filename)
"""
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
def transform(self, tree):
"""Transform an AST into a modified parse tree."""
if not (isinstance(tree, tuple) or isinstance(tree, list)):
tree = parser.ast2tuple(tree, line_info=1)
return self.compile_node(tree)
def parsesuite(self, text):
"""Return a modified parse tree for the given suite text."""
return self.transform(parser.suite(text))
def parseexpr(self, text):
"""Return a modified parse tree for the given expression text."""
return self.transform(parser.expr(text))
def parsefile(self, file):
"""Return a modified parse tree for the contents of the given file."""
if type(file) == type(''):
file = open(file)
return self.parsesuite(file.read())
# --------------------------------------------------------------
#
# PRIVATE METHODS
#
def compile_node(self, node):
### emit a line-number node?
n = node[0]
if n == symbol.encoding_decl:
self.encoding = node[2]
node = node[1]
n = node[0]
if n == symbol.single_input:
return self.single_input(node[1:])
if n == symbol.file_input:
return self.file_input(node[1:])
if n == symbol.eval_input:
return self.eval_input(node[1:])
if n == symbol.lambdef:
return self.lambdef(node[1:])
if n == symbol.funcdef:
return self.funcdef(node[1:])
if n == symbol.classdef:
return self.classdef(node[1:])
raise WalkerError, ('unexpected node type', n)
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))
def eval_input(self, nodelist):
# from the built-in function input()
### is this sufficient?
return Expression(self.com_node(nodelist[0]))
def decorator_name(self, nodelist):
listlen = len(nodelist)
assert listlen >= 1 and listlen % 2 == 1
item = self.atom_name(nodelist)
i = 1
while i < listlen:
assert nodelist[i][0] == token.DOT
assert nodelist[i + 1][0] == token.NAME
item = Getattr(item, nodelist[i + 1][1])
i += 2
return item
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def decorators(self, nodelist):
# decorators: decorator ([NEWLINE] decorator)* NEWLINE
items = []
for dec_nodelist in nodelist:
assert dec_nodelist[0] == symbol.decorator
items.append(self.decorator(dec_nodelist[1:]))
return Decorators(items)
def decorated(self, nodelist):
assert nodelist[0][0] == symbol.decorators
if nodelist[1][0] == symbol.funcdef:
n = [nodelist[0]] + list(nodelist[1][1:])
return self.funcdef(n)
elif nodelist[1][0] == symbol.classdef:
decorators = self.decorators(nodelist[0][1:])
cls = self.classdef(nodelist[1][1:])
cls.decorators = decorators
return cls
raise WalkerError()
def funcdef(self, nodelist):
# -6 -5 -4 -3 -2 -1
# funcdef: [decorators] 'def' NAME parameters ':' suite
# parameters: '(' [varargslist] ')'
if len(nodelist) == 6:
assert nodelist[0][0] == symbol.decorators
decorators = self.decorators(nodelist[0][1:])
else:
assert len(nodelist) == 5
decorators = None
lineno = nodelist[-4][2]
name = nodelist[-4][1]
args = nodelist[-3][2]
if args[0] == symbol.varargslist:
names, defaults, flags = self.com_arglist(args[1:])
else:
names = defaults = ()
flags = 0
doc = self.get_docstring(nodelist[-1])
# code for function
code = self.com_node(nodelist[-1])
if doc is not None:
assert isinstance(code, Stmt)
assert isinstance(code.nodes[0], Discard)
del code.nodes[0]
return Function(decorators, name, names, defaults, flags, doc, code,
lineno=lineno)
def lambdef(self, nodelist):
# lambdef: 'lambda' [varargslist] ':' test
if nodelist[2][0] == symbol.varargslist:
names, defaults, flags = self.com_arglist(nodelist[2][1:])
else:
names = defaults = ()
flags = 0
# code for lambda
code = self.com_node(nodelist[-1])
return Lambda(names, defaults, flags, code, lineno=nodelist[1][2])
old_lambdef = lambdef
def classdef(self, nodelist):
# classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
name = nodelist[1][1]
doc = self.get_docstring(nodelist[-1])
if nodelist[2][0] == token.COLON:
bases = []
elif nodelist[3][0] == token.RPAR:
bases = []
else:
bases = self.com_bases(nodelist[3])
# code for class
code = self.com_node(nodelist[-1])
if doc is not None:
assert isinstance(code, Stmt)
assert isinstance(code.nodes[0], Discard)
del code.nodes[0]
return Class(name, bases, doc, code, lineno=nodelist[1][2])
def stmt(self, nodelist):
return self.com_stmt(nodelist[0])
small_stmt = stmt
flow_stmt = stmt
compound_stmt = stmt
def simple_stmt(self, nodelist):
# small_stmt (';' small_stmt)* [';'] NEWLINE
stmts = []
for i in range(0, len(nodelist), 2):
self.com_append_stmt(stmts, nodelist[i])
return Stmt(stmts)
def parameters(self, nodelist):
raise WalkerError
def varargslist(self, nodelist):
raise WalkerError
def fpdef(self, nodelist):
raise WalkerError
def fplist(self, nodelist):
raise WalkerError
def dotted_name(self, nodelist):
raise WalkerError
def comp_op(self, nodelist):
raise WalkerError
def trailer(self, nodelist):
raise WalkerError
def sliceop(self, nodelist):
raise WalkerError
def argument(self, nodelist):
raise WalkerError
# --------------------------------------------------------------
#
# STATEMENT NODES (invoked by com_node())
#
def expr_stmt(self, nodelist):
# augassign testlist | testlist ('=' testlist)*
en = nodelist[-1]
exprNode = self.lookup_node(en)(en[1:])
if len(nodelist) == 1:
return Discard(exprNode, lineno=exprNode.lineno)
if nodelist[1][0] == token.EQUAL:
nodesl = []
for i in range(0, len(nodelist) - 2, 2):
nodesl.append(self.com_assign(nodelist[i], OP_ASSIGN))
return Assign(nodesl, exprNode, lineno=nodelist[1][2])
else:
lval = self.com_augassign(nodelist[0])
op = self.com_augassign_op(nodelist[1])
return AugAssign(lval, op[1], exprNode, lineno=op[2])
raise WalkerError, "can't get here"
def print_stmt(self, nodelist):
# print ([ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ])
items = []
if len(nodelist) == 1:
start = 1
dest = None
elif nodelist[1][0] == token.RIGHTSHIFT:
assert len(nodelist) == 3 \
or nodelist[3][0] == token.COMMA
dest = self.com_node(nodelist[2])
start = 4
else:
dest = None
start = 1
for i in range(start, len(nodelist), 2):
items.append(self.com_node(nodelist[i]))
if nodelist[-1][0] == token.COMMA:
return Print(items, dest, lineno=nodelist[0][2])
return Printnl(items, dest, lineno=nodelist[0][2])
def del_stmt(self, nodelist):
return self.com_assign(nodelist[1], OP_DELETE)
def pass_stmt(self, nodelist):
return Pass(lineno=nodelist[0][2])
def break_stmt(self, nodelist):
return Break(lineno=nodelist[0][2])
def continue_stmt(self, nodelist):
return Continue(lineno=nodelist[0][2])
def return_stmt(self, nodelist):
# return: [testlist]
if len(nodelist) < 2:
return Return(Const(None), lineno=nodelist[0][2])
return Return(self.com_node(nodelist[1]), lineno=nodelist[0][2])
def yield_stmt(self, nodelist):
expr = self.com_node(nodelist[0])
return Discard(expr, lineno=expr.lineno)
def yield_expr(self, nodelist):
if len(nodelist) > 1:
value = self.com_node(nodelist[1])
else:
value = Const(None)
return Yield(value, lineno=nodelist[0][2])
def raise_stmt(self, nodelist):
# raise: [test [',' test [',' test]]]
if len(nodelist) > 5:
expr3 = self.com_node(nodelist[5])
else:
expr3 = None
if len(nodelist) > 3:
expr2 = self.com_node(nodelist[3])
else:
expr2 = None
if len(nodelist) > 1:
expr1 = self.com_node(nodelist[1])
else:
expr1 = None
return Raise(expr1, expr2, expr3, lineno=nodelist[0][2])
def import_stmt(self, nodelist):
# import_stmt: import_name | import_from
assert len(nodelist) == 1
return self.com_node(nodelist[0])
def import_name(self, nodelist):
# import_name: 'import' dotted_as_names
return Import(self.com_dotted_as_names(nodelist[1]),
lineno=nodelist[0][2])
def import_from(self, nodelist):
# import_from: 'from' ('.'* dotted_name | '.') 'import' ('*' |
# '(' import_as_names ')' | import_as_names)
assert nodelist[0][1] == 'from'
idx = 1
while nodelist[idx][1] == '.':
idx += 1
level = idx - 1
if nodelist[idx][0] == symbol.dotted_name:
fromname = self.com_dotted_name(nodelist[idx])
idx += 1
else:
fromname = ""
assert nodelist[idx][1] == 'import'
if nodelist[idx + 1][0] == token.STAR:
return From(fromname, [('*', None)], level,
lineno=nodelist[0][2])
else:
node = nodelist[idx + 1 + (nodelist[idx + 1][0] == token.LPAR)]
return From(fromname, self.com_import_as_names(node), level,
lineno=nodelist[0][2])
def global_stmt(self, nodelist):
# global: NAME (',' NAME)*
names = []
for i in range(1, len(nodelist), 2):
names.append(nodelist[i][1])
return Global(names, lineno=nodelist[0][2])
def exec_stmt(self, nodelist):
# exec_stmt: 'exec' expr ['in' expr [',' expr]]
expr1 = self.com_node(nodelist[1])
if len(nodelist) >= 4:
expr2 = self.com_node(nodelist[3])
if len(nodelist) >= 6:
expr3 = self.com_node(nodelist[5])
else:
expr3 = None
else:
expr2 = expr3 = None
return Exec(expr1, expr2, expr3, lineno=nodelist[0][2])
def assert_stmt(self, nodelist):
# 'assert': test, [',' test]
expr1 = self.com_node(nodelist[1])
if (len(nodelist) == 4):
expr2 = self.com_node(nodelist[3])
else:
expr2 = None
return Assert(expr1, expr2, lineno=nodelist[0][2])
def if_stmt(self, nodelist):
# if: test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
tests = []
for i in range(0, len(nodelist) - 3, 4):
testNode = self.com_node(nodelist[i + 1])
suiteNode = self.com_node(nodelist[i + 3])
tests.append((testNode, suiteNode))
if len(nodelist) % 4 == 3:
elseNode = self.com_node(nodelist[-1])
## elseNode.lineno = nodelist[-1][1][2]
else:
elseNode = None
return If(tests, elseNode, lineno=nodelist[0][2])
def while_stmt(self, nodelist):
# 'while' test ':' suite ['else' ':' suite]
testNode = self.com_node(nodelist[1])
bodyNode = self.com_node(nodelist[3])
if len(nodelist) > 4:
elseNode = self.com_node(nodelist[6])
else:
elseNode = None
return While(testNode, bodyNode, elseNode, lineno=nodelist[0][2])
def for_stmt(self, nodelist):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
assignNode = self.com_assign(nodelist[1], OP_ASSIGN)
listNode = self.com_node(nodelist[3])
bodyNode = self.com_node(nodelist[5])
if len(nodelist) > 8:
elseNode = self.com_node(nodelist[8])
else:
elseNode = None
return For(assignNode, listNode, bodyNode, elseNode,
lineno=nodelist[0][2])
def try_stmt(self, nodelist):
return self.com_try_except_finally(nodelist)
def with_stmt(self, nodelist):
return self.com_with(nodelist)
def with_var(self, nodelist):
return self.com_with_var(nodelist)
def suite(self, nodelist):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if len(nodelist) == 1:
return self.com_stmt(nodelist[0])
stmts = []
for node in nodelist:
if node[0] == symbol.stmt:
self.com_append_stmt(stmts, node)
return Stmt(stmts)
# --------------------------------------------------------------
#
# EXPRESSION NODES (invoked by com_node())
#
def testlist(self, nodelist):
# testlist: expr (',' expr)* [',']
# testlist_safe: test [(',' test)+ [',']]
# exprlist: expr (',' expr)* [',']
return self.com_binary(Tuple, nodelist)
testlist_safe = testlist # XXX
testlist1 = testlist
exprlist = testlist
def testlist_gexp(self, nodelist):
if len(nodelist) == 2 and nodelist[1][0] == symbol.gen_for:
test = self.com_node(nodelist[0])
return self.com_generator_expression(test, nodelist[1])
return self.testlist(nodelist)
def test(self, nodelist):
# or_test ['if' or_test 'else' test] | lambdef
if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
return self.lambdef(nodelist[0])
then = self.com_node(nodelist[0])
if len(nodelist) > 1:
assert len(nodelist) == 5
assert nodelist[1][1] == 'if'
assert nodelist[3][1] == 'else'
test = self.com_node(nodelist[2])
else_ = self.com_node(nodelist[4])
return IfExp(test, then, else_, lineno=nodelist[1][2])
return then
def or_test(self, nodelist):
# and_test ('or' and_test)* | lambdef
if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
return self.lambdef(nodelist[0])
return self.com_binary(Or, nodelist)
old_test = or_test
def and_test(self, nodelist):
# not_test ('and' not_test)*
return self.com_binary(And, nodelist)
def not_test(self, nodelist):
# 'not' not_test | comparison
result = self.com_node(nodelist[-1])
if len(nodelist) == 2:
return Not(result, lineno=nodelist[0][2])
return result
def comparison(self, nodelist):
# comparison: expr (comp_op expr)*
node = self.com_node(nodelist[0])
if len(nodelist) == 1:
return node
results = []
for i in range(2, len(nodelist), 2):
nl = nodelist[i-1]
# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
# | 'in' | 'not' 'in' | 'is' | 'is' 'not'
n = nl[1]
if n[0] == token.NAME:
type = n[1]
if len(nl) == 3:
if type == 'not':
type = 'not in'
else:
type = 'is not'
else:
type = _cmp_types[n[0]]
lineno = nl[1][2]
results.append((type, self.com_node(nodelist[i])))
# we need a special "compare" node so that we can distinguish
# 3 < x < 5 from (3 < x) < 5
# the two have very different semantics and results (note that the
# latter form is always true)
return Compare(node, results, lineno=lineno)
def expr(self, nodelist):
# xor_expr ('|' xor_expr)*
return self.com_binary(Bitor, nodelist)
def xor_expr(self, nodelist):
# xor_expr ('^' xor_expr)*
return self.com_binary(Bitxor, nodelist)
def and_expr(self, nodelist):
# xor_expr ('&' xor_expr)*
return self.com_binary(Bitand, nodelist)
def shift_expr(self, nodelist):
# shift_expr ('<<'|'>>' shift_expr)*
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
if nodelist[i-1][0] == token.LEFTSHIFT:
node = LeftShift([node, right], lineno=nodelist[1][2])
elif nodelist[i-1][0] == token.RIGHTSHIFT:
node = RightShift([node, right], lineno=nodelist[1][2])
else:
raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
return node
def arith_expr(self, nodelist):
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
if nodelist[i-1][0] == token.PLUS:
node = Add([node, right], lineno=nodelist[1][2])
elif nodelist[i-1][0] == token.MINUS:
node = Sub([node, right], lineno=nodelist[1][2])
else:
raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
return node
def term(self, nodelist):
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
t = nodelist[i-1][0]
if t == token.STAR:
node = Mul([node, right])
elif t == token.SLASH:
node = Div([node, right])
elif t == token.PERCENT:
node = Mod([node, right])
elif t == token.DOUBLESLASH:
node = FloorDiv([node, right])
else:
raise ValueError, "unexpected token: %s" % t
node.lineno = nodelist[1][2]
return node
def factor(self, nodelist):
elt = nodelist[0]
t = elt[0]
node = self.lookup_node(nodelist[-1])(nodelist[-1][1:])
# need to handle (unary op)constant here...
if t == token.PLUS:
return UnaryAdd(node, lineno=elt[2])
elif t == token.MINUS:
return UnarySub(node, lineno=elt[2])
elif t == token.TILDE:
node = Invert(node, lineno=elt[2])
return node
def power(self, nodelist):
# power: atom trailer* ('**' factor)*
node = self.com_node(nodelist[0])
for i in range(1, len(nodelist)):
elt = nodelist[i]
if elt[0] == token.DOUBLESTAR:
return Power([node, self.com_node(nodelist[i+1])],
lineno=elt[2])
node = self.com_apply_trailer(node, elt)
return node
def atom(self, nodelist):
return self._atom_dispatch[nodelist[0][0]](nodelist)
def atom_lpar(self, nodelist):
if nodelist[1][0] == token.RPAR:
return Tuple((), lineno=nodelist[0][2])
return self.com_node(nodelist[1])
def atom_lsqb(self, nodelist):
if nodelist[1][0] == token.RSQB:
return List((), lineno=nodelist[0][2])
return self.com_list_constructor(nodelist[1])
def atom_lbrace(self, nodelist):
if nodelist[1][0] == token.RBRACE:
return Dict((), lineno=nodelist[0][2])
return self.com_dictmaker(nodelist[1])
def atom_backquote(self, nodelist):
return Backquote(self.com_node(nodelist[1]))
def atom_number(self, nodelist):
### need to verify this matches compile.c
k = eval(nodelist[0][1])
return Const(k, lineno=nodelist[0][2])
def decode_literal(self, lit):
if self.encoding:
# this is particularly fragile & a bit of a
# hack... changes in compile.c:parsestr and
# tokenizer.c must be reflected here.
if self.encoding not in ['utf-8', 'iso-8859-1']:
lit = unicode(lit, 'utf-8').encode(self.encoding)
return eval("# coding: %s\n%s" % (self.encoding, lit))
else:
return eval(lit)
def atom_string(self, nodelist):
k = ''
for node in nodelist:
k += self.decode_literal(node[1])
return Const(k, lineno=nodelist[0][2])
def atom_name(self, nodelist):
return Name(nodelist[0][1], lineno=nodelist[0][2])
# --------------------------------------------------------------
#
# INTERNAL PARSING UTILITIES
#
# The use of com_node() introduces a lot of extra stack frames,
# enough to cause a stack overflow compiling test.test_parser with
# the standard interpreter recursionlimit. The com_node() is a
# convenience function that hides the dispatch details, but comes
# at a very high cost. It is more efficient to dispatch directly
# in the callers. In these cases, use lookup_node() and call the
# dispatched node directly.
def lookup_node(self, node):
return self._dispatch[node[0]]
def com_node(self, node):
# Note: compile.c has handling in com_node for del_stmt, pass_stmt,
# break_stmt, stmt, small_stmt, flow_stmt, simple_stmt,
# and compound_stmt.
# We'll just dispatch them.
return self._dispatch[node[0]](node[1:])
def com_NEWLINE(self, *args):
# A ';' at the end of a line can make a NEWLINE token appear
# here, Render it harmless. (genc discards ('discard',
# ('const', xxxx)) Nodes)
return Discard(Const(None))
def com_arglist(self, nodelist):
# varargslist:
# (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
# | fpdef ['=' test] (',' fpdef ['=' test])* [',']
# fpdef: NAME | '(' fplist ')'
# fplist: fpdef (',' fpdef)* [',']
names = []
defaults = []
flags = 0
i = 0
while i < len(nodelist):
node = nodelist[i]
if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
if node[0] == token.STAR:
node = nodelist[i+1]
if node[0] == token.NAME:
names.append(node[1])
flags = flags | CO_VARARGS
i = i + 3
if i < len(nodelist):
# should be DOUBLESTAR
t = nodelist[i][0]
if t == token.DOUBLESTAR:
node = nodelist[i+1]
else:
raise ValueError, "unexpected token: %s" % t
names.append(node[1])
flags = flags | CO_VARKEYWORDS
break
# fpdef: NAME | '(' fplist ')'
names.append(self.com_fpdef(node))
i = i + 1
if i < len(nodelist) and nodelist[i][0] == token.EQUAL:
defaults.append(self.com_node(nodelist[i + 1]))
i = i + 2
elif len(defaults):
# we have already seen an argument with default, but here
# came one without
raise SyntaxError, "non-default argument follows default argument"
# skip the comma
i = i + 1
return names, defaults, flags
def com_fpdef(self, node):
# fpdef: NAME | '(' fplist ')'
if node[1][0] == token.LPAR:
return self.com_fplist(node[2])
return node[1][1]
def com_fplist(self, node):
# fplist: fpdef (',' fpdef)* [',']
if len(node) == 2:
return self.com_fpdef(node[1])
list = []
for i in range(1, len(node), 2):
list.append(self.com_fpdef(node[i]))
return tuple(list)
def com_dotted_name(self, node):
# String together the dotted names and return the string
name = ""
for n in node:
if type(n) == type(()) and n[0] == 1:
name = name + n[1] + '.'
return name[:-1]
def com_dotted_as_name(self, node):
assert node[0] == symbol.dotted_as_name
node = node[1:]
dot = self.com_dotted_name(node[0][1:])
if len(node) == 1:
return dot, None
assert node[1][1] == 'as'
assert node[2][0] == token.NAME
return dot, node[2][1]
def com_dotted_as_names(self, node):
assert node[0] == symbol.dotted_as_names
node = node[1:]
names = [self.com_dotted_as_name(node[0])]
for i in range(2, len(node), 2):
names.append(self.com_dotted_as_name(node[i]))
return names
def com_import_as_name(self, node):
assert node[0] == symbol.import_as_name
node = node[1:]
assert node[0][0] == token.NAME
if len(node) == 1:
return node[0][1], None
assert node[1][1] == 'as', node
assert node[2][0] == token.NAME
return node[0][1], node[2][1]
def com_import_as_names(self, node):
assert node[0] == symbol.import_as_names
node = node[1:]
names = [self.com_import_as_name(node[0])]
for i in range(2, len(node), 2):
names.append(self.com_import_as_name(node[i]))
return names
def com_bases(self, node):
bases = []
for i in range(1, len(node), 2):
bases.append(self.com_node(node[i]))
return bases
def com_try_except_finally(self, nodelist):
# ('try' ':' suite
# ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite]
# | 'finally' ':' suite))
if nodelist[3][0] == token.NAME:
# first clause is a finally clause: only try-finally
return TryFinally(self.com_node(nodelist[2]),
self.com_node(nodelist[5]),
lineno=nodelist[0][2])
#tryexcept: [TryNode, [except_clauses], elseNode)]
clauses = []
elseNode = None
finallyNode = None
for i in range(3, len(nodelist), 3):
node = nodelist[i]
if node[0] == symbol.except_clause:
# except_clause: 'except' [expr [(',' | 'as') expr]] */
if len(node) > 2:
expr1 = self.com_node(node[2])
if len(node) > 4:
expr2 = self.com_assign(node[4], OP_ASSIGN)
else:
expr2 = None
else:
expr1 = expr2 = None
clauses.append((expr1, expr2, self.com_node(nodelist[i+2])))
if node[0] == token.NAME:
if node[1] == 'else':
elseNode = self.com_node(nodelist[i+2])
elif node[1] == 'finally':
finallyNode = self.com_node(nodelist[i+2])
try_except = TryExcept(self.com_node(nodelist[2]), clauses, elseNode,
lineno=nodelist[0][2])
if finallyNode:
return TryFinally(try_except, finallyNode, lineno=nodelist[0][2])
else:
return try_except
def com_with(self, nodelist):
# with_stmt: 'with' expr [with_var] ':' suite
expr = self.com_node(nodelist[1])
body = self.com_node(nodelist[-1])
if nodelist[2][0] == token.COLON:
var = None
else:
var = self.com_assign(nodelist[2][2], OP_ASSIGN)
return With(expr, var, body, lineno=nodelist[0][2])
def com_with_var(self, nodelist):
# with_var: 'as' expr
return self.com_node(nodelist[1])
def com_augassign_op(self, node):
assert node[0] == symbol.augassign
return node[1]
def com_augassign(self, node):
"""Return node suitable for lvalue of augmented assignment
Names, slices, and attributes are the only allowable nodes.
"""
l = self.com_node(node)
if l.__class__ in (Name, Slice, Subscript, Getattr):
return l
raise SyntaxError, "can't assign to %s" % l.__class__.__name__
def com_assign(self, node, assigning):
# return a node suitable for use as an "lvalue"
# loop to avoid trivial recursion
while 1:
t = node[0]
if t in (symbol.exprlist, symbol.testlist, symbol.testlist_safe, symbol.testlist_gexp):
if len(node) > 2:
return self.com_assign_tuple(node, assigning)
node = node[1]
elif t in _assign_types:
if len(node) > 2:
raise SyntaxError, "can't assign to operator"
node = node[1]
elif t == symbol.power:
if node[1][0] != symbol.atom:
raise SyntaxError, "can't assign to operator"
if len(node) > 2:
primary = self.com_node(node[1])
for i in range(2, len(node)-1):
ch = node[i]
if ch[0] == token.DOUBLESTAR:
raise SyntaxError, "can't assign to operator"
primary = self.com_apply_trailer(primary, ch)
return self.com_assign_trailer(primary, node[-1],
assigning)
node = node[1]
elif t == symbol.atom:
t = node[1][0]
if t == token.LPAR:
node = node[2]
if node[0] == token.RPAR:
raise SyntaxError, "can't assign to ()"
elif t == token.LSQB:
node = node[2]
if node[0] == token.RSQB:
raise SyntaxError, "can't assign to []"
return self.com_assign_list(node, assigning)
elif t == token.NAME:
return self.com_assign_name(node[1], assigning)
else:
raise SyntaxError, "can't assign to literal"
else:
raise SyntaxError, "bad assignment (%s)" % t
def com_assign_tuple(self, node, assigning):
assigns = []
for i in range(1, len(node), 2):
assigns.append(self.com_assign(node[i], assigning))
return AssTuple(assigns, lineno=extractLineNo(node))
def com_assign_list(self, node, assigning):
assigns = []
for i in range(1, len(node), 2):
if i + 1 < len(node):
if node[i + 1][0] == symbol.list_for:
raise SyntaxError, "can't assign to list comprehension"
assert node[i + 1][0] == token.COMMA, node[i + 1]
assigns.append(self.com_assign(node[i], assigning))
return AssList(assigns, lineno=extractLineNo(node))
def com_assign_name(self, node, assigning):
return AssName(node[1], assigning, lineno=node[2])
def com_assign_trailer(self, primary, node, assigning):
t = node[1][0]
if t == token.DOT:
return self.com_assign_attr(primary, node[2], assigning)
if t == token.LSQB:
return self.com_subscriptlist(primary, node[2], assigning)
if t == token.LPAR:
raise SyntaxError, "can't assign to function call"
raise SyntaxError, "unknown trailer type: %s" % t
def com_assign_attr(self, primary, node, assigning):
return AssAttr(primary, node[1], assigning, lineno=node[-1])
def com_binary(self, constructor, nodelist):
"Compile 'NODE (OP NODE)*' into (type, [ node1, ..., nodeN ])."
l = len(nodelist)
if l == 1:
n = nodelist[0]
return self.lookup_node(n)(n[1:])
items = []
for i in range(0, l, 2):
n = nodelist[i]
items.append(self.lookup_node(n)(n[1:]))
return constructor(items, lineno=extractLineNo(nodelist))
def com_stmt(self, node):
result = self.lookup_node(node)(node[1:])
assert result is not None
if isinstance(result, Stmt):
return result
return Stmt([result])
def com_append_stmt(self, stmts, node):
result = self.lookup_node(node)(node[1:])
assert result is not None
if isinstance(result, Stmt):
stmts.extend(result.nodes)
else:
stmts.append(result)
if hasattr(symbol, 'list_for'):
def com_list_constructor(self, nodelist):
# listmaker: test ( list_for | (',' test)* [','] )
values = []
for i in range(1, len(nodelist)):
if nodelist[i][0] == symbol.list_for:
assert len(nodelist[i:]) == 1
return self.com_list_comprehension(values[0],
nodelist[i])
elif nodelist[i][0] == token.COMMA:
continue
values.append(self.com_node(nodelist[i]))
return List(values, lineno=values[0].lineno)
def com_list_comprehension(self, expr, node):
# list_iter: list_for | list_if
# list_for: 'for' exprlist 'in' testlist [list_iter]
# list_if: 'if' test [list_iter]
# XXX should raise SyntaxError for assignment
lineno = node[1][2]
fors = []
while node:
t = node[1][1]
if t == 'for':
assignNode = self.com_assign(node[2], OP_ASSIGN)
listNode = self.com_node(node[4])
newfor = ListCompFor(assignNode, listNode, [])
newfor.lineno = node[1][2]
fors.append(newfor)
if len(node) == 5:
node = None
else:
node = self.com_list_iter(node[5])
elif t == 'if':
test = self.com_node(node[2])
newif = ListCompIf(test, lineno=node[1][2])
newfor.ifs.append(newif)
if len(node) == 3:
node = None
else:
node = self.com_list_iter(node[3])
else:
raise SyntaxError, \
("unexpected list comprehension element: %s %d"
% (node, lineno))
return ListComp(expr, fors, lineno=lineno)
def com_list_iter(self, node):
assert node[0] == symbol.list_iter
return node[1]
else:
def com_list_constructor(self, nodelist):
values = []
for i in range(1, len(nodelist), 2):
values.append(self.com_node(nodelist[i]))
return List(values, lineno=values[0].lineno)
if hasattr(symbol, 'gen_for'):
def com_generator_expression(self, expr, node):
# gen_iter: gen_for | gen_if
# gen_for: 'for' exprlist 'in' test [gen_iter]
# gen_if: 'if' test [gen_iter]
lineno = node[1][2]
fors = []
while node:
t = node[1][1]
if t == 'for':
assignNode = self.com_assign(node[2], OP_ASSIGN)
genNode = self.com_node(node[4])
newfor = GenExprFor(assignNode, genNode, [],
lineno=node[1][2])
fors.append(newfor)
if (len(node)) == 5:
node = None
else:
node = self.com_gen_iter(node[5])
elif t == 'if':
test = self.com_node(node[2])
newif = GenExprIf(test, lineno=node[1][2])
newfor.ifs.append(newif)
if len(node) == 3:
node = None
else:
node = self.com_gen_iter(node[3])
else:
raise SyntaxError, \
("unexpected generator expression element: %s %d"
% (node, lineno))
fors[0].is_outmost = True
return GenExpr(GenExprInner(expr, fors), lineno=lineno)
def com_gen_iter(self, node):
assert node[0] == symbol.gen_iter
return node[1]
def com_dictmaker(self, nodelist):
# dictmaker: test ':' test (',' test ':' value)* [',']
items = []
for i in range(1, len(nodelist), 4):
items.append((self.com_node(nodelist[i]),
self.com_node(nodelist[i+2])))
return Dict(items, lineno=items[0][0].lineno)
def com_apply_trailer(self, primaryNode, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
return self.com_call_function(primaryNode, nodelist[2])
if t == token.DOT:
return self.com_select_member(primaryNode, nodelist[2])
if t == token.LSQB:
return self.com_subscriptlist(primaryNode, nodelist[2], OP_APPLY)
raise SyntaxError, 'unknown node type: %s' % t
def com_select_member(self, primaryNode, nodelist):
if nodelist[0] != token.NAME:
raise SyntaxError, "member must be a name"
return Getattr(primaryNode, nodelist[1], lineno=nodelist[2])
def com_call_function(self, primaryNode, nodelist):
if nodelist[0] == token.RPAR:
return CallFunc(primaryNode, [], lineno=extractLineNo(nodelist))
args = []
kw = 0
star_node = dstar_node = None
len_nodelist = len(nodelist)
i = 1
while i < len_nodelist:
node = nodelist[i]
if node[0]==token.STAR:
if star_node is not None:
raise SyntaxError, 'already have the varargs indentifier'
star_node = self.com_node(nodelist[i+1])
i = i + 3
continue
elif node[0]==token.DOUBLESTAR:
if dstar_node is not None:
raise SyntaxError, 'already have the kwargs indentifier'
dstar_node = self.com_node(nodelist[i+1])
i = i + 3
continue
# positional or named parameters
kw, result = self.com_argument(node, kw, star_node)
if len_nodelist != 2 and isinstance(result, GenExpr) \
and len(node) == 3 and node[2][0] == symbol.gen_for:
# allow f(x for x in y), but reject f(x for x in y, 1)
# should use f((x for x in y), 1) instead of f(x for x in y, 1)
raise SyntaxError, 'generator expression needs parenthesis'
args.append(result)
i = i + 2
return CallFunc(primaryNode, args, star_node, dstar_node,
lineno=extractLineNo(nodelist))
def com_argument(self, nodelist, kw, star_node):
if len(nodelist) == 3 and nodelist[2][0] == symbol.gen_for:
test = self.com_node(nodelist[1])
return 0, self.com_generator_expression(test, nodelist[2])
if len(nodelist) == 2:
if kw:
raise SyntaxError, "non-keyword arg after keyword arg"
if star_node:
raise SyntaxError, "only named arguments may follow *expression"
return 0, self.com_node(nodelist[1])
result = self.com_node(nodelist[3])
n = nodelist[1]
while len(n) == 2 and n[0] != token.NAME:
n = n[1]
if n[0] != token.NAME:
raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
node = Keyword(n[1], result, lineno=n[2])
return 1, node
def com_subscriptlist(self, primary, nodelist, assigning):
# slicing: simple_slicing | extended_slicing
# simple_slicing: primary "[" short_slice "]"
# extended_slicing: primary "[" slice_list "]"
# slice_list: slice_item ("," slice_item)* [","]
# backwards compat slice for '[i:j]'
if len(nodelist) == 2:
sub = nodelist[1]
if (sub[1][0] == token.COLON or \
(len(sub) > 2 and sub[2][0] == token.COLON)) and \
sub[-1][0] != symbol.sliceop:
return self.com_slice(primary, sub, assigning)
subscripts = []
for i in range(1, len(nodelist), 2):
subscripts.append(self.com_subscript(nodelist[i]))
return Subscript(primary, assigning, subscripts,
lineno=extractLineNo(nodelist))
def com_subscript(self, node):
# slice_item: expression | proper_slice | ellipsis
ch = node[1]
t = ch[0]
if t == token.DOT and node[2][0] == token.DOT:
return Ellipsis()
if t == token.COLON or len(node) > 2:
return self.com_sliceobj(node)
return self.com_node(ch)
def com_sliceobj(self, node):
# proper_slice: short_slice | long_slice
# short_slice: [lower_bound] ":" [upper_bound]
# long_slice: short_slice ":" [stride]
# lower_bound: expression
# upper_bound: expression
# stride: expression
#
# Note: a stride may be further slicing...
items = []
if node[1][0] == token.COLON:
items.append(Const(None))
i = 2
else:
items.append(self.com_node(node[1]))
# i == 2 is a COLON
i = 3
if i < len(node) and node[i][0] == symbol.test:
items.append(self.com_node(node[i]))
i = i + 1
else:
items.append(Const(None))
# a short_slice has been built. look for long_slice now by looking
# for strides...
for j in range(i, len(node)):
ch = node[j]
if len(ch) == 2:
items.append(Const(None))
else:
items.append(self.com_node(ch[2]))
return Sliceobj(items, lineno=extractLineNo(node))
def com_slice(self, primary, node, assigning):
# short_slice: [lower_bound] ":" [upper_bound]
lower = upper = None
if len(node) == 3:
if node[1][0] == token.COLON:
upper = self.com_node(node[2])
else:
lower = self.com_node(node[1])
elif len(node) == 4:
lower = self.com_node(node[1])
upper = self.com_node(node[3])
return Slice(primary, assigning, lower, upper,
lineno=extractLineNo(node))
def get_docstring(self, node, n=None):
if n is None:
n = node[0]
node = node[1:]
if n == symbol.suite:
if len(node) == 1:
return self.get_docstring(node[0])
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.file_input:
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.atom:
if node[0][0] == token.STRING:
s = ''
for t in node:
s = s + eval(t[1])
return s
return None
if n == symbol.stmt or n == symbol.simple_stmt \
or n == symbol.small_stmt:
return self.get_docstring(node[0])
if n in _doc_nodes and len(node) == 1:
return self.get_docstring(node[0])
return None
_doc_nodes = [
symbol.expr_stmt,
symbol.testlist,
symbol.testlist_safe,
symbol.test,
symbol.or_test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
symbol.power,
]
# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
# | 'in' | 'not' 'in' | 'is' | 'is' 'not'
_cmp_types = {
token.LESS : '<',
token.GREATER : '>',
token.EQEQUAL : '==',
token.EQUAL : '==',
token.LESSEQUAL : '<=',
token.GREATEREQUAL : '>=',
token.NOTEQUAL : '!=',
}
_legal_node_types = [
symbol.funcdef,
symbol.classdef,
symbol.stmt,
symbol.small_stmt,
symbol.flow_stmt,
symbol.simple_stmt,
symbol.compound_stmt,
symbol.expr_stmt,
symbol.print_stmt,
symbol.del_stmt,
symbol.pass_stmt,
symbol.break_stmt,
symbol.continue_stmt,
symbol.return_stmt,
symbol.raise_stmt,
symbol.import_stmt,
symbol.global_stmt,
symbol.exec_stmt,
symbol.assert_stmt,
symbol.if_stmt,
symbol.while_stmt,
symbol.for_stmt,
symbol.try_stmt,
symbol.with_stmt,
symbol.suite,
symbol.testlist,
symbol.testlist_safe,
symbol.test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.exprlist,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
symbol.power,
symbol.atom,
]
if hasattr(symbol, 'yield_stmt'):
_legal_node_types.append(symbol.yield_stmt)
if hasattr(symbol, 'yield_expr'):
_legal_node_types.append(symbol.yield_expr)
_assign_types = [
symbol.test,
symbol.or_test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
]
_names = {}
for k, v in symbol.sym_name.items():
_names[k] = v
for k, v in token.tok_name.items():
_names[k] = v
def debug_tree(tree):
l = []
for elt in tree:
if isinstance(elt, int):
l.append(_names.get(elt, elt))
elif isinstance(elt, str):
l.append(elt)
else:
l.append(debug_tree(elt))
return l
| |
#############################
## Import required modules ##
#############################
"""
- argparse, collections, csv, and sys are part of the standard Python library
- numpy, sympy, and cyvcf are easily installed via conda and pip
"""
import argparse
import collections
import csv
import sys
import numpy as np
import sympy
import cyvcf
########################################
## Import files from the command line ##
########################################
parser = argparse.ArgumentParser(description="This program calculates nucleotide"\
" diversity separately for autosomes and sex-linked"\
" chromosomes. It requires, as input, a minimum of:"\
" a VCF of polymorphic sites, a list of sex-linked contigs, and"\
" bed files containing callable regions for each population. It is assumed"\
" that all filtering (both VCF and bed file) has been done elsewhere.")
#Print help/usage if no arguments are supplied
if len(sys.argv)==1:
parser.print_usage()
sys.exit(1)
# A function to check if bootstrap input is non-negative
def check_negative(boot_int):
value = int(boot_int)
if value < 0:
raise argparse.ArgumentTypeError("%s is an invalid value. Input for --bootstrap must be nonnegative integer" % boot_int)
return value
# Parse the command line
parser.add_argument("--vcf", required=True,
help="REQUIRED. Input VCF file. Can be gzipped.")
parser.add_argument("--outfile", required=True,
help="REQUIRED. Name of output file. Will overwrite if exists.")
parser.add_argument("--callable_regions", nargs="*", required=True,
help="REQUIRED. Bed files (no header) containing callable regions for each population (to accurately calculate pi). Order of files must correspond exactly to the order of the population lists")
parser.add_argument("--bootstrap", default=0, type=check_negative,
help="Default is 0. If n > 0, returns 95% confidence interval of distribution of n replicates with replacement")
parser.add_argument("--male_list", nargs="*", default=None,
help="Default is None (i.e., all females). Provide file listing males. List males using exact names (capitalization, etc.) as found in the vcf file. Allows script to properly handle X chromosomes")
parser.add_argument("--population_lists", nargs="*", default=None,
help="Default is None. If None, script will treat all individuals as coming from the same population. Else, will read files and treat all individuals in each file as coming from the same population.")
parser.add_argument("--autosomal_scaffolds", default=None,
help="Default is None. Provide a file listing autosomal scaffolds (either in a single column, or horizontally in a single row separated by tabs).")
parser.add_argument("--x_linked_scaffolds", default=None,
help="Default is None. Provide a file listing x-linked scaffolds (either in a single column, or horizontally in a single row separated by tabs).")
parser.add_argument("--scaffold_sites_filter", type=int, default=0,
help="Default is 0. Minimum number of callable sites for a scaffold/chromosome to be included in analyses. Scaffold is removed if this minimum is not met in any individual.")
parser.add_argument("--min_cov", type=int, default=0,
help="Default is 0. Minimum read depth for a sample at a site for that genotype to be included.")
parser.add_argument("--QD", type=float, default=0,
help="Default is 0. Minimum quality by depth (QD) value.")
parser.add_argument("--FS", type=float, default=10000.0,
help="Default is 10000. Maximum Fisher strand (FS) score.")
parser.add_argument("--QUAL", type=float, default=0.0,
help="Default is 0. Minimum site quality score.")
parser.add_argument("--MAPQ", type=float, default=0.0,
help="Default is 0. Minimum mean mapping quality score.")
args = parser.parse_args()
if len(args.population_lists) != len(args.callable_regions):
print "Order and number of --callable_region files must match --population_lists files exactly"
sys.exit(1)
else:
print "Found %d populations (based on --callable_region and --population_lists files)" % len(args.population_lists)
print "Matched the following files (if incorrect, be sure --callable_region and --population_lists files are ordered in the same way - i.e., --callable_regions pop1_callable.bed pop2_callable.bed --population_lists pop1_ids.txt pop2_ids.txt):"
for idx,val in enumerate(args.callable_regions):
print "%s %s" % (args.callable_regions[idx], args.population_lists[idx])
print "Reading input files..."
print ""
# Processes X-linked scaffolds
with open(args.x_linked_scaffolds,"r") as f:
x_linked = [item for sublist in list(csv.reader(f,delimiter="\t")) for item in sublist]
x_linked = [x.strip() for x in x_linked]
while "" in x_linked:
x_linked.remove("")
# Process autosomal scaffolds
with open(args.autosomal_scaffolds,"r") as f:
auto_scaff = [item for sublist in list(csv.reader(f,delimiter="\t")) for item in sublist]
auto_scaff = [x.strip() for x in auto_scaff]
while "" in auto_scaff:
auto_scaff.remove("")
# Process input list of males
# Includes a number of cleaning and filtering steps to clean up accidental whitespace, and
# read from both vertical and horizonal lists (horizontal have to be tab separated)
# If no input lists are provided, it treats all samples in the VCF as female
if args.male_list != None:
with open(args.male_list[0],"r") as f:
males = [item for sublist in list(csv.reader(f,delimiter="\t")) for item in sublist]
males = [x.strip() for x in males]
while "" in males:
males.remove("")
else:
males = []
# Open vcf file and initiate the CyVCF parser
vcf_file = open(args.vcf,"r")
vcf_reader = cyvcf.Reader(vcf_file)
# Process input population lists
# Includes a number of cleaning and filtering steps to clean up accidental whitespace, and
# read from both vertical and horizonal lists (horizontal have to be tab separated)
# If no input lists are provided, it treats all samples in the VCF as coming from the same
# population
if args.population_lists != None:
populations = []
no_populations = False
for i in args.population_lists:
with open(i,"r") as f:
temp_pop = [item for sublist in list(csv.reader(f,delimiter="\t")) for item in sublist]
temp_pop = [x.strip() for x in temp_pop]
while "" in temp_pop:
temp_pop.remove("")
populations.append([temp_pop,i,[[],0],[[],0]])
else:
populations = [[vcf_reader.samples,args.vcf,[[],0],[[],0]]]
#########################################################
## Calculate total callable sequence for each DNA type ##
#########################################################
print ""
print "Calculating total callable sequence and filtering low-coverage scaffolds..."
print ""
# Process input bed files of callable regions
for idx,i in enumerate(args.callable_regions):
callable_dict = {}
with open(i,"r") as f:
for j in csv.reader(f,delimiter="\t"):
if j[0] in callable_dict:
callable_dict[j[0]] += int(j[2]) - int(j[1])
else:
if j[0] in auto_scaff:
callable_dict[j[0]] = int(j[2]) - int(j[1])
elif j[0] in x_linked:
callable_dict[j[0]] = int(j[2]) - int(j[1])
initial_scaffolds = len(callable_dict)
filtered_dict = {k:v for (k,v) in callable_dict.iteritems() if v >= args.scaffold_sites_filter}
passing_scaffolds = len(filtered_dict)
populations[idx].append(filtered_dict)
print "For population corresponding to %s - %s:" % (args.callable_regions[idx], args.population_lists[idx])
print "Initial scaffolds: %d" % initial_scaffolds
print "Removed scaffolds (not enough sequence coverage): %d" % (initial_scaffolds - passing_scaffolds)
print "Remaining scaffolds: %d" % passing_scaffolds
print ""
print populations
########################################################################
## Functions to calculated diversity and conduct bootstrap resampling ##
########################################################################
def pi_pairwise(tot_diff, k, sequence_length):
""" Calculates mean nucleotide diversity, pi, using equation from Box 1.3 in
Charlesworth and Charleswoth (2010):
(tot_diff / (k choose 2)) / sequence_length
where:
tot_diff = the total number of differences observed in all pairwise comparisons
k = the number of chromosomes sampled (e.g. k = 6 for 3 diploid individuals)
sequence_length = the number of bases analyzed per sequence (should be same for
all sequences)
"""
if k == 0:
return 0
elif k == 1:
return 0
else:
numerator = float(tot_diff) / ((float(k) * (float(k) - 1)) / 2.0)
return numerator / float(sequence_length)
# no longer used in this script
def pi_hohenlohe(allele_count_list):
"""Function calculates pi from Hohenlohe et al. (2010)
pi = 1 - sum((n_i choose 2) / (n choose 2))
where:
n_i is count of allele i in sample
n is the sum of n_i (allele1 plus allele2 in this case, as we assume bi-allelic sites
inputs:
allele_count_list is a list of the counts of the different alleles present at a site
assumptions:
snps
sympy installed and imported for binomial coefficient calculation
"""
n_tot = 0
for i in allele_count_list:
n_tot += i
if n_tot == 0:
return 0
elif n_tot == 1:
return 0
else:
pi_denom = float(sympy.binomial(n_tot,2))
pi_numer = 0.0
for i in allele_count_list:
pi_numer += float(sympy.binomial(i,2))
return (1.0 - (pi_numer / pi_denom))
def count_diffs(allele_list):
""" Takes a list or string of alleles to count the number of differences among chromosomes.
Example: For an input site from 3 diploid individuals with genotypes (A/T), (T/T), and (C/A),
appropriate inputs would be either ["A","T","T","T","C","A"] or "ATTTCA".
Returns:
The number of pairwise differences as an integer
"""
diffs = 0
for index,i in enumerate(allele_list):
for j in allele_list[index + 1:]:
if i != j:
diffs += 1
return diffs
def rand_sample(data_input, n_sites):
"""Outputs a numpy array of resampled (with replacement) values for bootstrap function
Inputs:
data_input is the list of values to resample
n_vals is the number of values to be resampled
Requirements:
numpy installed and imported
Note: IF THE NUMBER OF SITES TO BE RESAMPLED IS LONGER THAN THE DATA_INPUT, ALL
ADDITIONAL SITES ARE ASSUMED TO HAVE A VALUE OF 0
"""
dim = len(data_input)
# create random indices to grab the random sample
indices = np.random.random_integers(0, n_sites, n_sites)
# return all random values with indices less than the length of the input table
# all other sites are assumed to be zero
return np.take(data_input, indices[ indices < dim ])
def bootstrap_pi_distribution(data_input, n_sites, replicates):
""" Returns a bootstrapped distribution (with replacement) as a list.
data_input is the list of values
n_sites is the total number of callable site (will be longer than data_input if
the input VCF only contained polymorphic sites)
replicates is the number of bootstrap replicates
"""
resamples = []
n_sites = float(n_sites)
for i in range(replicates):
resamples.append(np.sum(rand_sample(data_input,n_sites)) / n_sites)
return resamples
###########################################################################
## Parse VCF file, calculate pi per site, and count pairwise differences ##
###########################################################################
print "Beginning diversity calculations"
counter = 0
for record in vcf_reader:
try:
if float(record.INFO["QD"]) >= args.QD and float(record.INFO["FS"]) <= args.FS and float(record.QUAL) >= args.QUAL and float(record.INFO["MQ"]) >= args.MAPQ:
if record.CHROM in x_linked:
for pop in populations:
allele_list=[]
male_count = 0
total_count = 0
for indv in pop[0]:
if indv in males:
male_count += 1
total_count += 1
call = record.genotype(indv)
if call['GT'] != None:
if indv in males:
try:
if float(call["DP"]) >= args.min_cov:
# Randomly selects an allele to include for males
allele_list.append(call.gt_bases[int(np.random.choice([0,2],1))])
except TypeError:
print "Error reading DP - site excluded"
else:
try:
if float(call["DP"]) >= args.min_cov:
# call.gt_bases returns in the format "A/T", so this grabs the A and
# the T, while skipping the / (or |)
allele_list.append(call.gt_bases[0])
allele_list.append(call.gt_bases[2])
except TypeError:
print "Error reading DP - site excluded"
# Process allele list and calculate pi and number of differences
if len(allele_list) == male_count + (2 * (total_count - male_count)):
pop[3][0].append(pi_pairwise(count_diffs(allele_list), len(allele_list), 1.0))
else:
pop[3][1] += 1
elif record.CHROM in auto_scaff:
for pop in populations:
allele_list = []
total_count = 0
for indv in pop[0]:
call = record.genotype(indv)
total_count += 1
if call['GT'] != None:
try:
if float(call["DP"]) >= args.min_cov:
allele_list.append(call.gt_bases[0])
allele_list.append(call.gt_bases[2])
except TypeError:
print "Error reading DP - site excluded"
if len(allele_list) == total_count * 2:
pop[2][0].append(pi_pairwise(count_diffs(allele_list), len(allele_list), 1.0))
else:
pop[2][1] += 1
except KeyError:
print "KeyError - site removed for poorly formated INFO"
print record
for pop in populations:
if record.CHROM in x_linked:
pop[3][1] += 1
elif record.CHROM in auto_scaff:
pop[2][1] += 1
counter += 1
if counter % 10000 == 0:
print "%d records complete..." % (counter)
print "VCF traversal complete"
# Close the vcf file
vcf_file.close()
###########################################################
## Calculate mean diversity (and bootstrap, if selected) ##
###########################################################
print "Calculating diversity statistics..."
#### Charlesworth Pi
print ""
print "Now computing mean nucleotide diversity using equation from Charlesworth and Charlesworth (2010)"
print ""
with open(args.outfile,"w") as f:
output_list = [["population","auto_seq","x_seq","auto_mean","auto_2.5","auto_97.5","x_mean","x_2.5","x_97.5","x_a_mean","x_a_2.5","x_a_97.5"]]
for pop in populations:
auto_seq = 0
x_seq = 0
for i in pop[-1]:
if i in x_linked:
x_seq += pop[-1][i]
elif i in auto_scaff:
auto_seq += pop[-1][i]
#Adjust callable sequence if any sites were filtered during VCF traversal
auto_seq_adj = auto_seq - pop[2][1]
x_seq_adj = x_seq - pop[3][1]
temp_line = []
#Append population name
if pop[1][-4:] == ".txt":
temp_line.append(pop[1][:-4])
else:
append(pop[1])
#Append callable sequence
temp_line.append(auto_seq_adj)
temp_line.append(x_seq_adj)
#Calculate and append autosomal diversity stats
#Mean
temp_line.append(float(np.sum(pop[2][0])) / float(auto_seq_adj))
#Bootstrap
auto_dist = bootstrap_pi_distribution(pop[2][0], auto_seq_adj, args.bootstrap)
temp_line.append(np.percentile(auto_dist, 2.5))
temp_line.append(np.percentile(auto_dist, 97.5))
#Calculate and append X chromosome diversity stats
#Mean
temp_line.append(float(np.sum(pop[3][0])) / float(auto_seq_adj))
#Bootstrap
x_dist = bootstrap_pi_distribution(pop[3][0], x_seq_adj, args.bootstrap)
temp_line.append(np.percentile(x_dist, 2.5))
temp_line.append(np.percentile(x_dist, 97.5))
#Calculate and append X/A diversity ratio stats
x_a_dist = np.asarray(x_dist) / np.asarray(auto_dist)
#Mean
temp_line.append(np.mean(x_a_dist))
#Bootstrap
temp_line.append(np.percentile(x_a_dist, 2.5))
temp_line.append(np.percentile(x_a_dist, 97.5))
output_list.append(temp_line)
w = csv.writer(f, dialect="excel-tab")
w.writerows(output_list)
| |
import os
import sys, logging
import pywikibot
import csv
import MySQLdb as mdb
from MySQLdb import cursors
import traceback
import re
import time
from datetime import datetime, timedelta
import argparse
import pdb
'''
Create the logger
'''
NOW = time.strftime("%Y_%m_%d_%H_%M")
OUT_DIR_LOGS = os.path.expanduser('~/logs')
def create_logger(logger, logLang='main2'):
log_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler(filename=(os.path.join(OUT_DIR_LOGS, logLang + '-babel_Users.log')))
file_handler.setFormatter(log_format)
logger.setLevel(logging.INFO)
logger.addHandler(file_handler)
class EditPatternUserProficency():
def __init__(self):
self.dir = os.path.expanduser('~/outputs/data_10_10_bigwiki2/')#'~/outputs/Global_Unique/'
self.outdir = os.path.expanduser('~/outputs/')#Global_Edit_Count/
self.disconnect_database()
self.con = None
self.cur = None
self.con2 = None
self.cur2 = None
self.gdbCursor = None
self.connectGlobalUserData()
def disconnect_database(self):
try:
self.cur.close()
self.con.close()
self.gdbCursor.close()
logging.info("Successfully disconnect " + self.language)
except:
pass
def connect_database(self, dbLang):
try:
db_name = dbLang+ 'wiki_p'
self.con = mdb.connect(db=db_name, host=dbLang+"wiki.labsdb", read_default_file=os.path.expanduser("~/replica.my.cnf"), cursorclass=mdb.cursors.SSDictCursor)
logging.info("Connection Successful: " + str(self.con))
self.cur = self.con.cursor()
except mdb.Error, e:
logging.error("Unable to establish connection")
try:
db_name2 = dbLang + 'wiki_p'
self.con2 = mdb.connect(db=db_name2, host=dbLang+"wiki.labsdb", read_default_file=os.path.expanduser("~/replica.my.cnf"), cursorclass=mdb.cursors.SSDictCursor)
logging.info("Connection Successful: " + str(self.con2))
self.cur2 = self.con2.cursor()
except mdb.Error, e:
logging.error("Unable to establish connection")
def connectGlobalUserData(self):
"""
Connect to global accounts. This is a different database than connectServer.
We are not using this method at the moment.
"""
try:
logging.info("Connecting to Toolserver MySql Db: mysql -h sql-s3 centralauth_p") # "mysql -hcentralauth-p.userdb"
print "Connecting to Toolserver MySql Db: mysql -h sql-s3 centralauth_p"
self.gdbConnection = mdb.connect(db='centralauth_p',
host="centralauth.labsdb",
read_default_file=os.path.expanduser("~/replica.my.cnf"))
logging.info("Connection Successful" + str(self.gdbConnection))
self.gdbCursor = self.gdbConnection.cursor(cursors.SSDictCursor)
except mdb.Error, e:
logging.error("Unable to establish connection")
def _isUserGlobal(self, user_name):
logging.info("checking if " + user_name + " is global")
attempts = 0
success = False
while attempts < 3 and not success:
unicode_user_name = unicode(user_name, 'utf-8')
query = ur'''select gu_name from globaluser where gu_name = "%s"'''
try:
#self.gdbCursor.execute(query.encode('utf-8'))
self.gdbCursor.execute((query % (unicode_user_name)).encode('utf-8'))
gUser = self.gdbCursor.fetchone()
if gUser:
#print gUser['gu_name']
self.gdbCursor.fetchall()
return True
else:
self.gdbCursor.fetchall()
return False
success = True
except Exception, e:
attempts += 1
traceback.print_exc()
logging.exception(e)
def user_namespace_contribution(self):
lang_code = re.compile(ur'(?P<lcode>\w+)[-]+(?P<level>[0-5\w])$', re.I | re.U)
query_groupby_namespace = ur'''SELECT count(rev_id) AS rev_count, sum(rev_len) AS total_rev_length, page_namespace
FROM revision_userindex, page
WHERE page_id = rev_page
AND rev_user = %(user_id)s GROUP BY page_namespace'''
for filename in os.listdir(self.dir):
wiki_code = filename.split("-")[0]
completed_users = []
with open(os.path.expanduser(self.outdir + wiki_code+'_global_users_namespace.csv'), 'w+') as data_file:
writer = csv.writer(data_file)
writer.writerow(('user_id', 'rev_count', 'total_rev_length', 'page_namespace', 'home_wiki','proficency_level'))
self.connect_database(filename.split("-")[0])
input_file = csv.DictReader(open(self.dir + filename))
for filerow in input_file:
if filerow['user_id'] not in completed_users:
match_template = lang_code.match(filerow['proficency_level'])
if match_template:
a = [wiki_code+'-1', wiki_code+'-2', wiki_code+'-3', wiki_code+'-4', wiki_code+'-5', wiki_code+'-N', wiki_code+'-n', wiki_code+'-M']
proficiency_code = None
if len(filerow['proficency_level'].split('_')) > 1:
proficiency_code = filerow['proficency_level'].split('_')[1]
else:
proficiency_code = filerow['proficency_level']
if any(x in proficiency_code for x in a):
#if self._isUserGlobal(filerow['user_id']):
attempt = 0
success = False
while attempt < 3 and not success:
try:
self.cur.execute(query_groupby_namespace, {'user_id':filerow['user_id']})
logging.info("processing user " + filerow['user_id'])
complete = False
while not complete:
group_by_namespace_data = self.cur.fetchone()
#self.cur.fetchall()
if not group_by_namespace_data:
complete = True
continue
writer.writerow([filerow['user_id'], group_by_namespace_data['rev_count'],
group_by_namespace_data['total_rev_length'], group_by_namespace_data['page_namespace'], \
filerow['home_wiki'], filerow['proficency_level']])
completed_users.append(filerow['user_id'])
success = True
except Exception, e:
attempt += 1
traceback.print_exc()
logging.exception(e)
except mdb.OperationalError, sqlEx:
attempt += 1
if sqlEx[0] == 2006:
logging.info("Caught the MySQL server gone away exception attempting to re-connect")
logging.error(sqlEx)
self.connect_database(filename.split("-")[0])
#else:
# logging.info(filerow['user_id'] + " is not a global user in " + wiki_code)
def edit_count_by_proficency(self):
lang_code = re.compile(ur'(?P<lcode>\w+)[-]+(?P<level>[0-5\w])$', re.I | re.U)
query_edit_count = ur'''SELECT user_editcount FROM user WHERE user_id = %(user_id)s'''
completed_users = []
for filename in os.listdir(self.dir):
wiki_code = filename.split("-")[0]
with open(os.path.expanduser(self.outdir + wiki_code+'_global_proficency_editcount.csv'), 'w+') as data_file:
writer = csv.writer(data_file)
writer.writerow(('user_id', 'edit_count', 'home_wiki','proficency_level'))
self.connect_database(filename.split("-")[0])
input_file = csv.DictReader(open(self.dir + filename))
for filerow in input_file:
if filerow[wiki_code +'_wiki_user_id'] != 'NA':
logging.info("processing user_id " + filerow[wiki_code+'_wiki_user_id'])
match_template = lang_code.match(filerow['proficency_level'])
#print filerow
if match_template:
a = [wiki_code+'-1', wiki_code+'-2', wiki_code+'-3', wiki_code+'-4', wiki_code+'-5', wiki_code+'-N', wiki_code+'-n', wiki_code+'-M']
proficiency_len = (filerow['proficency_level']).split('_')
if len(proficiency_len) > 1:
proficiency_code = proficiency_len[1]
else:
proficiency_code = filerow['proficency_level']
if any(x in proficiency_code for x in a):
attempt = 0
success = False
while attempt < 3 and not success:
try:
self.cur.execute(query_edit_count, {'user_id':filerow[wiki_code+'_wiki_user_id']})
complete = False
while not complete:
user_edit_count = self.cur.fetchone()
#self.cur.fetchall()
if not user_edit_count:
complete = True
continue
#print user_edit_count
writer.writerow([filerow[wiki_code+'_wiki_user_id'], user_edit_count['user_editcount'], \
filerow['home_wiki'],proficiency_code])
success = True
except Exception, e:
attempt += 1
traceback.print_exc()
logging.exception(e)
except mdb.OperationalError, sqlEx:
attempt += 1
if sqlEx[0] == 2006:
logging.info("Caught the MySQL server gone away exception attempting to re-connect")
logging.error(sqlEx)
self.connect_database(filename.split("-")[0])
def _get_rev_count(self, user_id):
query_distinct_title_count = ur'''SELECT COUNT(distinct(rev_page)) as REV_COUNT
FROM revision_userindex, page
WHERE page_id = rev_page
AND page_namespace=0
AND rev_user = %(user_id)s'''
attempt = 0
success = False
while attempt < 3 and not success:
try:
self.cur2.execute(query_distinct_title_count, {'user_id':user_id})
complete = False
while not complete:
rev_count = self.cur2.fetchone()
self.cur2.fetchall()
if not rev_count:
complete = True
continue
user_rev_count = rev_count['REV_COUNT']
print user_rev_count
success = True
return user_rev_count
except Exception, e:
attempt += 1
traceback.print_exc()
logging.exception(e)
def get_titles(self):
lang_code = re.compile(ur'(?P<lcode>\w+)[-]+(?P<level>[0-5\w])$', re.I | re.U)
query_page_title = ur'''SELECT distinct(p.page_title)
FROM revision_userindex r, page p
WHERE p.page_id = r.rev_page
AND p.page_namespace=0
AND r.rev_user = %(user_id)s ORDER by RAND() LIMIT 100'''
with open(os.path.expanduser(self.outdir + 'en1_titles.csv'), 'w+') as en1_file, open(os.path.expanduser(self.outdir + 'enN_titles.csv'), 'w+') as enN_file:
en1writer = csv.writer(en1_file)
enNwriter = csv.writer(enN_file)
for filename in os.listdir(self.dir):
self.connect_database(filename.split("-")[0])
input_file = csv.DictReader(open(self.dir + filename))
wiki_code = filename.split("-")[0]
for filerow in input_file:
template_from_row = filerow['template']
logging.info("processing user_id " + filerow['user_id'])
match_template = lang_code.match(filerow['template'])
if match_template:
a = [wiki_code+'-1', wiki_code+'-2', wiki_code+'-3', wiki_code+'-4', wiki_code+'-5', wiki_code+'-N', wiki_code+'-n', wiki_code+'-M']
if any(code in filerow['template'] for code in a):
rev_count = self._get_rev_count(filerow['user_id'])
if wiki_code+'-N' == template_from_row:
if int(rev_count) > 100:
attempt = 0
success = False
while attempt < 3 and not success:
try:
self.cur.execute(query_page_title, {'user_id':int(filerow['user_id'])})
titles = self.cur.fetchall()
for title in titles:
enNwriter.writerow([title['page_title'].replace("_", " ")])
success = True
except Exception, e:
attempt += 1
traceback.print_exc()
logging.exception(e)
except mdb.OperationalError, sqlEx:
attempt += 1
if sqlEx[0] == 2006:
logging.info("Caught the MySQL server gone away exception attempting to re-connect")
logging.error(sqlEx)
self.connect_database(filename.split("-")[0])
elif wiki_code+'-1' == template_from_row:
attempt = 0
success = False
while attempt < 3 and not success:
try:
self.cur.execute(query_page_title, {'user_id':int(filerow['user_id'])})
titles = self.cur.fetchall()
for title in titles:
en1writer.writerow([title['page_title'].replace("_", " ")])
success = True
except Exception, e:
attempt += 1
traceback.print_exc()
logging.exception(e)
except mdb.OperationalError, sqlEx:
attempt += 1
if sqlEx[0] == 2006:
logging.info("Caught the MySQL server gone away exception attempting to re-connect")
logging.error(sqlEx)
self.connect_database(filename.split("-")[0])
def main():
log = logging.getLogger()
create_logger(log)
editPattern = EditPatternUserProficency()
#editPattern.edit_count_by_proficency()
editPattern.user_namespace_contribution()
#editPattern.get_titles()
if __name__ == "__main__":
main()
| |
from threading import Event, Thread
from Queue import Queue, Empty
from serial import Serial
from time import sleep
import logging
import string
import shlex
def has_nonascii(s):
ascii_chars = string.ascii_letters+string.digits+"!@#$%^&*()_+\|{}[]-_=+'\",.<>?:; "
return any([char for char in ascii_chars if char not in ascii_chars])
def is_csv(s):
return "," in s
class ATError(Exception):
def __init__(self, expected=None, received=None):
self.received = received
self.expected = expected
message = "Expected {}, got {}".format(expected, repr(received))
Exception.__init__(self, message)
self.received = received
class Modem():
read_buffer_size = 1000
read_timeout = 0.2
unexpected_queue = None
manufacturer = None
model = None
linesep = '\r\n'
ok_response = 'OK'
error_response = 'ERROR'
clcc_header = "+CLCC:"
def __init__(self, serial_path="/dev/ttyAMA0", timeout=0.2, monitor=True):
self.serial_path = serial_path
self.read_timeout = timeout
self.executing_command = Event()
self.should_monitor = Event()
self.unexpected_queue = Queue()
if monitor: self.start_monitoring()
def init_modem(self):
self.port = Serial(self.serial_path, 115200, timeout=self.read_timeout)
self.at()
self.enable_verbosity()
print("Battery voltage is: {}".format(self.get_voltage()))
self.manufacturer = self.at_command("AT+CGMI")
self.model = self.at_command("AT+CGMM")
self.at_command("AT+CLIP=1")
self.save_settings()
def save_settings(self):
self.at_command("AT&W")
def enable_verbosity(self):
return self.at_command('AT+CMEE=1')
def at(self):
response = self.at_command('AT')
if response is True: return
raise ATError(expected=self.ok_response, received=response)
def get_voltage(self):
answer = self.at_command('AT+CBC')
if not answer.startswith('+CBC'): return 0.0
voltage_str = answer.split(':')[1].split(',')[2]
voltage = round(int(voltage_str)/1000.0, 2)
return voltage
def process_clcc(self, clcc_line):
if clcc_line.startswith(self.clcc_header):
clcc_line = clcc_line[len(self.clcc_header):]
clcc_line = clcc_line.strip()
elements = shlex.split(clcc_line, ',')
if len(elements) < 8:
print("Unrecognized number of CLCC elements!")
print(repr(elements))
return
elif len(elements) > 8:
print("Too much CLCC elements!")
elements = elements[:8]
def call(self, number):
return self.at_command("ATD{};".format(number))
def hangup(self):
return self.at_command("ATH", noresponse=True)
def answer(self):
return self.at_command("ATA")
#Callbacks - to be overridden
def on_active_call(self):
print("Call is active - is it ever?")
def on_ring(self):
print("Ring ring ring bananaphone!")
def on_dialing(self):
print("Hope somebody answers...")
def on_busy(self):
print("Can't you see it's busy")
def on_hangup(self):
print("The person you were talking to got seriously bored")
def on_noanswer(self):
print("Somebody's tired of your shit")
def on_incoming_message(self, cmti_line):
print("You've got mail! Line: {}".format(cmti_line[len("+CMTI:"):]).strip())
clcc_mapping = [ #Outgoing
{
"0":on_active_call,
"1":on_held,
"2":on_active_call,
"3":on_active_call,
"4":on_active_call,
"5":on_active_call,
"6":on_hangup}
], [ #Incoming
{
"0":on_active_call,
"1":on_held,
"2":on_active_call,
"3":on_active_call,
"4":on_active_call,
"5":on_active_call,
"6":on_hangup}
],
def on_clcc(self, clcc_line):
#CLCC is operator-dependent, from what I understand.
for i in range(4):
if not has_nonascii(clcc_line) or not is_csv(clcc_line):
break
print("Garbled caller ID line! Try {}, line: {}".format(i, clcc_line))
sleep(1)
clcc_response = self.at_command("AT+CLCC", nook=True)
print(repr(lines))
for line in lines:
if line.startswith(self.clcc_header):
clcc_line = line
else:
self.queue_unexpected_data(line)
if has_nonascii(clcc_line) or not is_csv(clcc_line):
print("Still garbled CLCC line!"); return
print("Caller ID OK, line: {}".format(repr(clcc_line[len(self.clcc_header):])).strip())
#self.process_clcc(clcc_line)
#Low-level functions
def check_input(self):
#print("Checks input")
input = self.port.read(self.read_buffer_size)
if input:
self.queue_unexpected_data(input)
def at_command(self, command, noresponse=False, nook=False):
self.executing_command.set()
self.check_input()
self.port.write(command+self.linesep)
echo = self.port.read(len(command)) #checking for command echo
if echo != command:
raise ATError(received=echo, expected=command)
#print(repr(self.port.read(len(self.linesep)+1)))
self.port.read(len(self.linesep)+1) #shifting through the line separator - that +1 seems to be necessary when we're reading right after the echo
answer = self.port.read(self.read_buffer_size)
self.executing_command.clear()
lines = filter(None, answer.split(self.linesep))
#print(lines)
if not lines and noresponse: return True #one of commands that doesn't need a response
if nook: return lines
if self.ok_response not in lines: #expecting OK as one of the elements
raise ATError(expected=self.ok_response, received=lines)
#We can have a sudden undervoltage warning, though
#I'll assume the OK always goes last in the command
#So we can pass anything after OK to the unexpected line parser
ok_response_index = lines.index(self.ok_response)
if ok_response_index+1 < len(lines):
self.queue_unexpected_data(lines[(ok_response_index+1):])
lines = lines[:(ok_response_index+1)]
if len(lines) == 1: #Single-line response
if lines[0] == self.ok_response:
return True
else:
return lines[0]
else:
lines = lines[:-1]
if len(lines) == 1:
return lines[0]
else:
return lines
#Functions for background monitoring of any unexpected input
def queue_unexpected_data(self, data):
self.unexpected_queue.put(data)
def process_incoming_data(self, data):
logging.debug("Incoming data: {}".format(repr(data)))
if isinstance(data, str):
data = data.split(self.linesep)
lines = filter(None, data)
for line in lines:
#Now onto the callbacks
if line == "RING":
self.on_ring(); return
if line == "BUSY":
self.on_busy(); return
if line == "HANGUP":
self.on_hangup(); return
if line == "NO ANSWER":
self.on_no_answer(); return
if line in ["SMS Ready", "Call Ready"]:
pass; return #Modem just reset
if line.startswith("+CMTI:"):
self.on_incoming_message(line); return
if line.startswith("+CLCC:"):
self.on_clcc(line); return
self.parse_unexpected_message(lines)
def parse_unexpected_message(self, data):
#haaaax
if self.linesep[::-1] in "".join(data):
lines = "".join(data).split(self.linesep[::-1])
logging.debug("Unexpected lines: {}".format(data))
def monitor(self):
while self.should_monitor.isSet():
#print("Monitoring...")
if not self.executing_command.isSet():
#First, the serial port
data = self.port.read(self.read_buffer_size)
if data:
print("Got data through serial!")
self.process_incoming_data(data)
#Then, the queue of unexpected messages received from other commands
try:
data = self.unexpected_queue.get_nowait()
except Empty:
pass
else:
print("Got data from queue!")
self.process_incoming_data(data)
#print("Got to sleep")
sleep(self.read_timeout)
#print("Returned from sleep")
print("Stopped monitoring!")
def start_monitoring(self):
self.should_monitor.set()
self.thread = Thread(target=self.monitor)
self.thread.daemon=True
self.thread.start()
def stop_monitoring(self):
self.should_monitor.clear()
if __name__ == "__main__":
modem = Modem(timeout = 0.5)
modem.init_modem()
| |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
import unittest
import os
import uuid
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.exceptions import DiskFileDeleted
from swift.common.internal_client import UnexpectedResponse
from swift.container.backend import ContainerBroker
from swift.common import utils
from swiftclient import client
from swift.common.ring import Ring
from swift.common.utils import Timestamp, get_logger, hash_path
from swift.obj.diskfile import DiskFileManager
from swift.common.storage_policy import POLICIES
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
class Test(ReplProbeTest):
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(Test, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object',
policy=self.policy)
self.container_brain = BrainSplitter(self.url, self.token,
self.container_name)
self.int_client = self.make_internal_client(object_post_as_copy=False)
def tearDown(self):
super(Test, self).tearDown()
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
def _assert_consistent_object_metadata(self):
obj_info = []
for i in range(1, 5):
info_i = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info_i:
obj_info.append(info_i)
self.assertTrue(len(obj_info) > 1)
for other in obj_info[1:]:
self.assertDictEqual(obj_info[0], other)
def _assert_consistent_deleted_object(self):
for i in range(1, 5):
try:
info = self._get_object_info(self.account, self.container_name,
self.object_name, i)
if info is not None:
self.fail('Expected no disk file info but found %s' % info)
except DiskFileDeleted:
pass
def _get_db_info(self, account, container, number):
server_type = 'container'
obj_conf = self.configs['%s-server' % server_type]
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:container-server')
root = options.get('devices')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = Ring(swift_dir, ring_name=server_type)
part, nodes = ring.get_nodes(account, container)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
path_hash = utils.hash_path(account, container)
_dir = utils.storage_directory('%ss' % server_type, part, path_hash)
db_dir = os.path.join(root, device, _dir)
db_file = os.path.join(db_dir, '%s.db' % path_hash)
db = ContainerBroker(db_file)
return db.get_info()
def _assert_consistent_container_dbs(self):
db_info = []
for i in range(1, 5):
info_i = self._get_db_info(self.account, self.container_name, i)
if info_i:
db_info.append(info_i)
self.assertTrue(len(db_info) > 1)
for other in db_info[1:]:
self.assertEqual(db_info[0]['hash'], other['hash'],
'Container db hash mismatch: %s != %s'
% (db_info[0]['hash'], other['hash']))
def _assert_object_metadata_matches_listing(self, listing, metadata):
self.assertEqual(listing['bytes'], int(metadata['content-length']))
self.assertEqual(listing['hash'], metadata['etag'])
self.assertEqual(listing['content_type'], metadata['content-type'])
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
def _put_object(self, headers=None, body=u'stuff'):
headers = headers or {}
self.int_client.upload_object(StringIO(body), self.account,
self.container_name,
self.object_name, headers)
def _post_object(self, headers):
self.int_client.set_object_metadata(self.account, self.container_name,
self.object_name, headers)
def _delete_object(self):
self.int_client.delete_object(self.account, self.container_name,
self.object_name)
def _get_object(self, headers=None, expect_statuses=(2,)):
return self.int_client.get_object(self.account,
self.container_name,
self.object_name,
headers,
acceptable_statuses=expect_statuses)
def _get_object_metadata(self):
return self.int_client.get_object_metadata(self.account,
self.container_name,
self.object_name)
def _assert_consistent_suffix_hashes(self):
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
name_hash = hash_path(
self.account, self.container_name, self.object_name)
results = []
for node in onodes:
results.append(
(node,
direct_get_suffix_hashes(node, opart, [name_hash[-3:]])))
for (node, hashes) in results[1:]:
self.assertEqual(results[0][1], hashes,
'Inconsistent suffix hashes found: %s' % results)
def test_object_delete_is_replicated(self):
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._get_object()
def test_object_after_replication_with_subsequent_post(self):
self.brain.put_container(policy_index=0)
# put object
self._put_object(headers={'Content-Type': 'foo'}, body=u'older')
# put newer object to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'}, body=u'newer')
metadata = self._get_object_metadata()
etag = metadata['etag']
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# post some user meta to all servers
self._post_object({'x-object-meta-bar': 'meta-bar'})
# run replicator
self.get_to_final_state()
# check that newer data has been replicated to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
self.assertEqual(etag, metadata['etag'])
self.assertEqual('bar', metadata['content-type'])
self.assertEqual('meta-bar', metadata['x-object-meta-bar'])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_put(self):
sysmeta = {'x-object-sysmeta-foo': 'older'}
sysmeta2 = {'x-object-sysmeta-foo': 'newer'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# put object with updated sysmeta to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers=sysmeta2)
metadata = self._get_object_metadata()
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check sysmeta has been replicated to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check user sysmeta ok on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta2.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], sysmeta2[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=int(self.policy))
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers=usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# run replicator
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self._assert_consistent_object_metadata()
self._assert_consistent_container_dbs()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_previous_incomplete_puts(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = baz
#
# ...run replicator and expect...
#
# t1.data:
# t2.meta: ctype = baz
self.brain.put_container(policy_index=0)
# incomplete write to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# content-type update to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'baz'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_post(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t2.meta:
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# metadata update with newest data unavailable
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
self.get_to_final_state()
# check object metadata
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
else:
self.fail('obj not found in container listing')
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_ctype_replicated_when_subsequent_post_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta: ctype = bif
# t3.data: ctype = baz, color = 'Red'
# t4.meta: color = Blue
#
# ...run replicator and expect...
#
# t1.data:
# t4-delta.meta: ctype = baz, color = Blue
self.brain.put_container(policy_index=0)
# incomplete write
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff write
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete post with content type
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'Content-Type': 'bif'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post to handoff with content type
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'baz',
'X-Object-Meta-Color': 'Red'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# complete post with no content type
self._post_object(headers={'X-Object-Meta-Color': 'Blue',
'X-Object-Sysmeta-Test': 'ignored'})
# 'baz' wins over 'bar' but 'Blue' wins over 'Red'
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'baz'
self.assertEqual(obj['content_type'], expected)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_put_ctype_replicated_when_subsequent_posts_without_ctype(self):
# primary half handoff half
# ------------ ------------
# t0.data: ctype = foo
# t1.data: ctype = bar
# t2.meta:
# t3.meta
#
# ...run replicator and expect...
#
# t1.data: ctype = bar
# t3.meta
self.brain.put_container(policy_index=0)
self._put_object(headers={'Content-Type': 'foo',
'X-Object-Sysmeta-Test': 'older'})
# incomplete write to handoff half
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar',
'X-Object-Sysmeta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete post with no content type to primary half
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._post_object(headers={'X-Object-Meta-Color': 'Red',
'X-Object-Sysmeta-Test': 'ignored'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with no content type to handoff half
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Color': 'Blue'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
expected = 'bar'
self.assertEqual(obj['content_type'], expected)
self._assert_object_metadata_matches_listing(obj, metadata)
self.assertEqual(metadata['x-object-meta-color'], 'Blue')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newer')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_posted_metadata_only_persists_after_prior_put(self):
# newer metadata posted to subset of nodes should persist after an
# earlier put on other nodes, but older content-type on that subset
# should not persist
self.brain.put_container(policy_index=0)
# incomplete put to handoff
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with content-type to handoff
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newer',
'X-Object-Meta-Test': 'newer'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put to primary
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'newest',
'X-Object-Meta-Test': 'newer'})
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# incomplete post with no content-type to handoff which still has
# out of date content-type
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'X-Object-Meta-Test': 'newest'})
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newer')
self.brain.start_primary_half()
self.container_brain.start_primary_half()
self.get_to_final_state()
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['x-object-sysmeta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
# check container listing metadata
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
for obj in objs:
if obj['name'] == self.object_name:
break
self.assertEqual(obj['content_type'], 'newest')
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_object_metadata_matches_listing(obj, metadata)
self._assert_consistent_container_dbs()
self._assert_consistent_object_metadata()
self._assert_consistent_suffix_hashes()
def test_post_trumped_by_prior_delete(self):
# new metadata and content-type posted to subset of nodes should not
# cause object to persist after replication of an earlier delete on
# other nodes.
self.brain.put_container(policy_index=0)
# incomplete put
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# incomplete put then delete
self.brain.stop_handoff_half()
self.container_brain.stop_handoff_half()
self._put_object(headers={'Content-Type': 'oldest',
'X-Object-Sysmeta-Test': 'oldest',
'X-Object-Meta-Test': 'oldest'})
self._delete_object()
self.brain.start_handoff_half()
self.container_brain.start_handoff_half()
# handoff post
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._post_object(headers={'Content-Type': 'newest',
'X-Object-Sysmeta-Test': 'ignored',
'X-Object-Meta-Test': 'newest'})
# check object metadata
metadata = self._get_object_metadata()
self.assertEqual(metadata['x-object-sysmeta-test'], 'oldest')
self.assertEqual(metadata['x-object-meta-test'], 'newest')
self.assertEqual(metadata['content-type'], 'newest')
self.brain.start_primary_half()
self.container_brain.start_primary_half()
# delete trumps later post
self.get_to_final_state()
# check object is now deleted
self.assertRaises(UnexpectedResponse, self._get_object_metadata)
container_metadata, objs = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual(0, len(objs))
self._assert_consistent_container_dbs()
self._assert_consistent_deleted_object()
self._assert_consistent_suffix_hashes()
if __name__ == "__main__":
unittest.main()
| |
import time
from datetime import datetime, timedelta
from StringIO import StringIO
from django.core.handlers.modpython import ModPythonRequest
from django.core.handlers.wsgi import WSGIRequest, LimitedStream
from django.http import HttpRequest, HttpResponse, parse_cookie, build_request_repr
from django.utils import unittest
from django.utils.http import cookie_date
class RequestsTests(unittest.TestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_httprequest_repr(self):
request = HttpRequest()
request.path = u'/somepath/'
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<HttpRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<HttpRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': StringIO('')})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_repr(self):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': StringIO('')})
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<WSGIRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<WSGIRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_modpythonrequest(self):
class FakeModPythonRequest(ModPythonRequest):
def __init__(self, *args, **kwargs):
super(FakeModPythonRequest, self).__init__(*args, **kwargs)
self._get = self._post = self._meta = self._cookies = {}
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = 'bogus'
request = FakeModPythonRequest(req)
self.assertEqual(request.path, 'bogus')
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_modpythonrequest_repr(self):
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = '/somepath/'
request = ModPythonRequest(req)
request._get = {u'get-key': u'get-value'}
request._post = {u'post-key': u'post-value'}
request._cookies = {u'post-key': u'post-value'}
request._meta = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<ModPythonRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<ModPythonRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertTrue('; httponly' in str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(), 'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), '')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(5), 'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read sequentially from a stream
stream = LimitedStream(StringIO('12345678'), 8)
self.assertEqual(stream.read(5), '12345')
self.assertEqual(stream.read(5), '678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read lines from a stream
stream = LimitedStream(StringIO('1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), '1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), '56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), '78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), 'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), 'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), '\n')
# Read everything else.
self.assertEqual(stream.readline(), 'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), '1234\n')
self.assertEqual(stream.readline(3), 'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), 'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), '')
# Same test, but with read, not readline.
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.read(6), '1234\na')
self.assertEqual(stream.read(2), 'bc')
self.assertEqual(stream.read(2), 'd')
self.assertEqual(stream.read(2), '')
self.assertEqual(stream.read(), '')
def test_stream(self):
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(), 'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or raw_post_data.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertEqual(request.raw_post_data, 'name=value')
self.assertEqual(request.read(), 'name=value')
def test_value_after_read(self):
"""
Construction of POST or raw_post_data is not allowed after reading
from request.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(2), 'na')
self.assertRaises(Exception, lambda: request.raw_post_data)
self.assertEqual(request.POST, {})
def test_raw_post_data_after_POST_multipart(self):
"""
Reading raw_post_data after parsing multipart is not allowed
"""
# Because multipart is used for large amounts fo data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting raw_post_data = '' either.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertRaises(Exception, lambda: request.raw_post_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {})
def test_read_by_lines(self):
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(list(request), ['name=value'])
def test_POST_after_raw_post_data_read(self):
"""
POST should be populated even if raw_post_data is read first
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
raw_data = request.raw_post_data
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_raw_post_data_read_and_stream_read(self):
"""
POST should be populated even if raw_post_data is read first, and then
the stream is read second.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
raw_data = request.raw_post_data
self.assertEqual(request.read(1), u'n')
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_raw_post_data_read_and_stream_read_multipart(self):
"""
POST should be populated even if raw_post_data is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
raw_data = request.raw_post_data
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), u'--boundary\r\nC')
self.assertEqual(request.POST, {u'name': [u'value']})
| |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control RL Unplugged datasets.
Examples in the dataset represent sequences stored when running a partially
trained agent (trained in online way) as described in
https://arxiv.org/abs/2006.13888.
Every dataset has a SARSA version, and datasets for environments for solving
which we believe one may need a recurrent agent also include a version of the
dataset with overlapping sequences of length 40.
Datasets for the dm_control_suite environments only include proprio
observations, while datasets for dm_locomotion include both pixel and proprio
observations.
"""
import collections
import functools
import os
from typing import Dict, Optional, Tuple, Set
from acme import wrappers
from acme.adders import reverb as adders
from dm_control import composer
from dm_control import suite
from dm_control.composer.variation import colors
from dm_control.composer.variation import distributions
from dm_control.locomotion import arenas
from dm_control.locomotion import props
from dm_control.locomotion import tasks
from dm_control.locomotion import walkers
from dm_env import specs
import numpy as np
import reverb
import tensorflow as tf
import tree
def _build_rodent_escape_env():
"""Build environment where a rodent escapes from a bowl."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.bowl.Bowl(
size=(20., 20.),
aesthetic='outdoor_natural')
locomotion_task = tasks.escape.Escape(
walker=walker,
arena=arena,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=20,
task=locomotion_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_maze_env():
"""Build environment where a rodent runs to targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_textures = arenas.labmaze_textures.WallTextures(
style='style_01')
arena = arenas.mazes.RandomMazeWithTargets(
x_cells=11,
y_cells=11,
xy_scale=.5,
z_height=.3,
max_rooms=4,
room_min_size=4,
room_max_size=5,
spawns_per_room=1,
targets_per_room=3,
wall_textures=wall_textures,
aesthetic='outdoor_natural')
rodent_task = tasks.random_goal_maze.ManyGoalsMaze(
walker=walker,
maze_arena=arena,
target_builder=functools.partial(
props.target_sphere.TargetSphere,
radius=0.05,
height_above_ground=.125,
rgb1=(0, 0, 0.4),
rgb2=(0, 0, 0.7)),
target_reward_scale=50.,
contact_termination=False,
control_timestep=.02,
physics_timestep=0.001)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_corridor_gaps():
"""Build environment where a rodent runs over gaps."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.4, high=0.8)
gap_length = distributions.Uniform(low=0.05, high=0.2)
arena = arenas.corridors.GapsCorridor(
corridor_width=2,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=40,
aesthetic='outdoor_natural')
rodent_task = tasks.corridors.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(5, 0, 0),
walker_spawn_rotation=0,
target_velocity=1.0,
contact_termination=False,
terminate_at_height=-0.3,
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=rodent_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_rodent_two_touch_env():
"""Build environment where a rodent touches targets."""
walker = walkers.Rat(
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena_floor = arenas.floors.Floor(
size=(10., 10.), aesthetic='outdoor_natural')
task_reach = tasks.reach.TwoTouch(
walker=walker,
arena=arena_floor,
target_builders=[
functools.partial(
props.target_sphere.TargetSphereTwoTouch,
radius=0.025),
],
randomize_spawn_rotation=True,
target_type_rewards=[25.],
shuffle_target_builders=False,
target_area=(1.5, 1.5),
physics_timestep=0.001,
control_timestep=.02)
raw_env = composer.Environment(
time_limit=30,
task=task_reach,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_walls_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
wall_width = distributions.Uniform(low=1, high=7)
wall_height = distributions.Uniform(low=2.5, high=4.0)
swap_wall_side = distributions.Bernoulli(prob=0.5)
wall_r = distributions.Uniform(low=0.5, high=0.6)
wall_g = distributions.Uniform(low=0.21, high=0.41)
wall_rgba = colors.RgbVariation(r=wall_r, g=wall_g, b=0, alpha=1)
arena = arenas.WallsCorridor(
wall_gap=5.0,
wall_width=wall_width,
wall_height=wall_height,
swap_wall_side=swap_wall_side,
wall_rgba=wall_rgba,
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_env():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
arena = arenas.EmptyCorridor(
corridor_width=10,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
def _build_humanoid_corridor_gaps():
"""Build humanoid walker walls environment."""
walker = walkers.CMUHumanoidPositionControlled(
name='walker',
observable_options={'egocentric_camera': dict(enabled=True)},
)
platform_length = distributions.Uniform(low=0.3, high=2.5)
gap_length = distributions.Uniform(low=0.75, high=1.25)
arena = arenas.GapsCorridor(
corridor_width=10,
platform_length=platform_length,
gap_length=gap_length,
corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(2, 0, 0),
walker_spawn_rotation=1.57, # pi / 2
physics_timestep=0.005,
control_timestep=0.03)
raw_env = composer.Environment(
time_limit=30,
task=humanoid_task,
strip_singleton_obs_buffer_dim=True)
return raw_env
class MujocoActionNormalizer(wrappers.EnvironmentWrapper):
"""Rescale actions to [-1, 1] range for mujoco physics engine.
For control environments whose actions have bounded range in [-1, 1], this
adaptor rescale actions to the desired range. This allows actor network to
output unscaled actions for better gradient dynamics.
"""
def __init__(self, environment, rescale='clip'):
super().__init__(environment)
self._rescale = rescale
def step(self, action):
"""Rescale actions to [-1, 1] range before stepping wrapped environment."""
if self._rescale == 'tanh':
scaled_actions = tree.map_structure(np.tanh, action)
elif self._rescale == 'clip':
scaled_actions = tree.map_structure(lambda a: np.clip(a, -1., 1.), action)
else:
raise ValueError('Unrecognized scaling option: %s' % self._rescale)
return self._environment.step(scaled_actions)
class NormilizeActionSpecWrapper(wrappers.EnvironmentWrapper):
"""Turn each dimension of the actions into the range of [-1, 1]."""
def __init__(self, environment):
super().__init__(environment)
action_spec = environment.action_spec()
self._scale = action_spec.maximum - action_spec.minimum
self._offset = action_spec.minimum
minimum = action_spec.minimum * 0 - 1.
maximum = action_spec.minimum * 0 + 1.
self._action_spec = specs.BoundedArray(
action_spec.shape,
action_spec.dtype,
minimum,
maximum,
name=action_spec.name)
def _from_normal_actions(self, actions):
actions = 0.5 * (actions + 1.0) # a_t is now in the range [0, 1]
# scale range to [minimum, maximum]
return actions * self._scale + self._offset
def step(self, action):
action = self._from_normal_actions(action)
return self._environment.step(action)
def action_spec(self):
return self._action_spec
class FilterObservationsWrapper(wrappers.EnvironmentWrapper):
"""Filter out all the observations not specified to this wrapper."""
def __init__(self, environment, observations_to_keep):
super().__init__(environment)
self._observations_to_keep = observations_to_keep
spec = self._environment.observation_spec()
filtered = [(k, spec[k]) for k in observations_to_keep]
self._observation_spec = collections.OrderedDict(filtered)
def _filter_observation(self, timestep):
observation = timestep.observation
filtered = [(k, observation[k]) for k in self._observations_to_keep]
return timestep._replace(observation=collections.OrderedDict(filtered))
def step(self, action):
return self._filter_observation(self._environment.step(action))
def reset(self):
return self._filter_observation(self._environment.reset())
def observation_spec(self):
return self._observation_spec
class ControlSuite:
"""Create bits needed to run agents on an Control Suite dataset."""
def __init__(self, task_name='humanoid_run'):
"""Initializes datasets/environments for the Deepmind Control suite.
Args:
task_name: take name. Must be one of,
finger_turn_hard, manipulator_insert_peg, humanoid_run,
cartpole_swingup, cheetah_run, fish_swim, manipulator_insert_ball,
walker_stand, walker_walk
"""
self.task_name = task_name
self._uint8_features = set([])
self._environment = None
if task_name == 'swim':
self._domain_name = 'fish'
self._task_name = 'swim'
self._shapes = {
'observation/target': (3,),
'observation/velocity': (13,),
'observation/upright': (1,),
'observation/joint_angles': (7,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'humanoid_run':
self._domain_name = 'humanoid'
self._task_name = 'run'
self._shapes = {
'observation/velocity': (27,),
'observation/com_velocity': (3,),
'observation/torso_vertical': (3,),
'observation/extremities': (12,),
'observation/head_height': (1,),
'observation/joint_angles': (21,),
'action': (21,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
elif task_name == 'manipulator_insert_ball':
self._domain_name = 'manipulator'
self._task_name = 'insert_ball'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'action': (5,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'manipulator_insert_peg':
self._domain_name = 'manipulator'
self._task_name = 'insert_peg'
self._shapes = {
'observation/arm_pos': (16,),
'observation/arm_vel': (8,),
'observation/touch': (5,),
'observation/hand_pos': (4,),
'observation/object_pos': (4,),
'observation/object_vel': (3,),
'observation/target_pos': (4,),
'episodic_reward': (),
'action': (5,),
'discount': (),
'reward': (),
'step_type': ()}
elif task_name == 'cartpole_swingup':
self._domain_name = 'cartpole'
self._task_name = 'swingup'
self._shapes = {
'observation/position': (3,),
'observation/velocity': (2,),
'action': (1,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_walk':
self._domain_name = 'walker'
self._task_name = 'walk'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'walker_stand':
self._domain_name = 'walker'
self._task_name = 'stand'
self._shapes = {
'observation/orientations': (14,),
'observation/velocity': (9,),
'observation/height': (1,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'cheetah_run':
self._domain_name = 'cheetah'
self._task_name = 'run'
self._shapes = {
'observation/position': (8,),
'observation/velocity': (9,),
'action': (6,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
elif task_name == 'finger_turn_hard':
self._domain_name = 'finger'
self._task_name = 'turn_hard'
self._shapes = {
'observation/position': (4,),
'observation/velocity': (3,),
'observation/touch': (2,),
'observation/target_position': (2,),
'observation/dist_to_target': (1,),
'action': (2,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()}
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
self._data_path = 'dm_control_suite/{}/train'.format(task_name)
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def uint8_features(self):
return self._uint8_features
@property
def environment(self):
"""Build and return the environment."""
if self._environment is not None:
return self._environment
self._environment = suite.load(
domain_name=self._domain_name,
task_name=self._task_name)
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
self._environment = NormilizeActionSpecWrapper(self._environment)
return self._environment
class CmuThirdParty:
"""Create bits needed to run agents on an locomotion humanoid dataset."""
def __init__(self, task_name='humanoid_walls'):
# 'humanoid_corridor|humanoid_gaps|humanoid_walls'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self.additional_paths = {}
self._proprio_keys = [
'walker/joints_vel',
'walker/sensors_velocimeter',
'walker/sensors_gyro',
'walker/joints_pos',
'walker/world_zaxis',
'walker/body_height',
'walker/sensors_accelerometer',
'walker/end_effectors_pos'
]
self._shapes = {
'observation/walker/joints_vel': (56,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/joints_pos': (56,),
'observation/walker/world_zaxis': (3,),
'observation/walker/body_height': (1,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/end_effectors_pos': (12,),
'observation/walker/egocentric_camera': (
64,
64,
3,
),
'action': (56,),
'discount': (),
'reward': (),
'episodic_reward': (),
'step_type': ()
}
if task_name == 'humanoid_corridor':
self._data_path = 'dm_locomotion/humanoid_corridor/seq2/train'
elif task_name == 'humanoid_gaps':
self._data_path = 'dm_locomotion/humanoid_gaps/seq2/train'
elif task_name == 'humanoid_walls':
self._data_path = 'dm_locomotion/humanoid_walls/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def uint8_features(self):
return self._uint8_features
@property
def shapes(self):
return self._shapes
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Build and return the environment."""
if self._task_name == 'humanoid_corridor':
self._environment = _build_humanoid_corridor_env()
elif self._task_name == 'humanoid_gaps':
self._environment = _build_humanoid_corridor_gaps()
elif self._task_name == 'humanoid_walls':
self._environment = _build_humanoid_walls_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
class Rodent:
"""Create bits needed to run agents on an Rodent dataset."""
def __init__(self, task_name='rodent_gaps'):
# 'rodent_escape|rodent_two_touch|rodent_gaps|rodent_mazes'
self._task_name = task_name
self._pixel_keys = self.get_pixel_keys()
self._uint8_features = set(['observation/walker/egocentric_camera'])
self._proprio_keys = [
'walker/joints_pos', 'walker/joints_vel', 'walker/tendons_pos',
'walker/tendons_vel', 'walker/appendages_pos', 'walker/world_zaxis',
'walker/sensors_accelerometer', 'walker/sensors_velocimeter',
'walker/sensors_gyro', 'walker/sensors_touch',
]
self._shapes = {
'observation/walker/joints_pos': (30,),
'observation/walker/joints_vel': (30,),
'observation/walker/tendons_pos': (8,),
'observation/walker/tendons_vel': (8,),
'observation/walker/appendages_pos': (15,),
'observation/walker/world_zaxis': (3,),
'observation/walker/sensors_accelerometer': (3,),
'observation/walker/sensors_velocimeter': (3,),
'observation/walker/sensors_gyro': (3,),
'observation/walker/sensors_touch': (4,),
'observation/walker/egocentric_camera': (64, 64, 3),
'action': (38,),
'discount': (),
'reward': (),
'step_type': ()
}
if task_name == 'rodent_gaps':
self._data_path = 'dm_locomotion/rodent_gaps/seq2/train'
elif task_name == 'rodent_escape':
self._data_path = 'dm_locomotion/rodent_bowl_escape/seq2/train'
elif task_name == 'rodent_two_touch':
self._data_path = 'dm_locomotion/rodent_two_touch/seq40/train'
elif task_name == 'rodent_mazes':
self._data_path = 'dm_locomotion/rodent_mazes/seq40/train'
else:
raise ValueError('Task \'{}\' not found.'.format(task_name))
@staticmethod
def get_pixel_keys():
return ('walker/egocentric_camera',)
@property
def shapes(self):
return self._shapes
@property
def uint8_features(self):
return self._uint8_features
@property
def data_path(self):
return self._data_path
@property
def environment(self):
"""Return environment."""
if self._task_name == 'rodent_escape':
self._environment = _build_rodent_escape_env()
elif self._task_name == 'rodent_gaps':
self._environment = _build_rodent_corridor_gaps()
elif self._task_name == 'rodent_two_touch':
self._environment = _build_rodent_two_touch_env()
elif self._task_name == 'rodent_mazes':
self._environment = _build_rodent_maze_env()
self._environment = NormilizeActionSpecWrapper(self._environment)
self._environment = MujocoActionNormalizer(
environment=self._environment, rescale='clip')
self._environment = wrappers.SinglePrecisionWrapper(self._environment)
all_observations = list(self._proprio_keys) + list(self._pixel_keys)
self._environment = FilterObservationsWrapper(self._environment,
all_observations)
return self._environment
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured
def _build_sequence_example(sequences):
"""Convert raw sequences into a Reverb sequence sample."""
data = adders.Step(
observation=sequences['observation'],
action=sequences['action'],
reward=sequences['reward'],
discount=sequences['discount'],
start_of_episode=(),
extras=())
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=data)
def _build_sarsa_example(sequences):
"""Convert raw sequences into a Reverb n-step SARSA sample."""
o_tm1 = tree.map_structure(lambda t: t[0], sequences['observation'])
o_t = tree.map_structure(lambda t: t[1], sequences['observation'])
a_tm1 = tree.map_structure(lambda t: t[0], sequences['action'])
a_t = tree.map_structure(lambda t: t[1], sequences['action'])
r_t = tree.map_structure(lambda t: t[0], sequences['reward'])
p_t = tree.map_structure(lambda t: t[0], sequences['discount'])
info = reverb.SampleInfo(key=tf.constant(0, tf.uint64),
probability=tf.constant(1.0, tf.float64),
table_size=tf.constant(0, tf.int64),
priority=tf.constant(1.0, tf.float64))
return reverb.ReplaySample(info=info, data=(o_tm1, a_tm1, r_t, p_t, o_t, a_t))
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False):
"""Batch data while handling unequal lengths."""
padded_shapes = {}
padded_shapes['observation'] = {}
for k, v in shapes.items():
if 'observation' in k:
padded_shapes['observation'][
k.replace('observation/', '')] = (-1,) + v
else:
padded_shapes[k] = (-1,) + v
padded_shapes['length'] = ()
return example_ds.padded_batch(batch_size,
padded_shapes=padded_shapes,
drop_remainder=drop_remainder)
def dataset(root_path: str,
data_path: str,
shapes: Dict[str, Tuple[int]],
num_threads: int,
batch_size: int,
uint8_features: Optional[Set[str]] = None,
num_shards: int = 100,
shuffle_buffer_size: int = 100000,
sarsa: bool = True) -> tf.data.Dataset:
"""Create tf dataset for training."""
uint8_features = uint8_features if uint8_features else {}
path = os.path.join(root_path, data_path)
filenames = [f'{path}-{i:05d}-of-{num_shards:05d}' for i in range(num_shards)]
file_ds = tf.data.Dataset.from_tensor_slices(filenames)
file_ds = file_ds.repeat().shuffle(num_shards)
example_ds = file_ds.interleave(
functools.partial(tf.data.TFRecordDataset, compression_type='GZIP'),
cycle_length=tf.data.experimental.AUTOTUNE,
block_length=5)
example_ds = example_ds.shuffle(shuffle_buffer_size)
def map_func(example):
example = _parse_seq_tf_example(example, uint8_features, shapes)
return example
example_ds = example_ds.map(map_func, num_parallel_calls=num_threads)
example_ds = example_ds.repeat().shuffle(batch_size * 10)
if sarsa:
example_ds = example_ds.map(
_build_sarsa_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds.batch(batch_size)
else:
example_ds = _padded_batch(
example_ds, batch_size, shapes, drop_remainder=True)
example_ds = example_ds.map(
_build_sequence_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
example_ds = example_ds.prefetch(tf.data.experimental.AUTOTUNE)
return example_ds
| |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Routines for testing WSGI applications.
Most interesting is TestApp
"""
import sys
import random
import urllib
import urlparse
import mimetypes
import time
import cgi
import os
#import webbrowser
from Cookie import BaseCookie
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import re
from webob import Response, Request
from wsgiref.validate import validator
__all__ = ['TestApp']
def tempnam_no_warning(*args):
"""
An os.tempnam with the warning turned off, because sometimes
you just need to use this and don't care about the stupid
security warning.
"""
return os.tempnam(*args)
class NoDefault(object):
pass
try:
sorted
except NameError:
def sorted(l):
l = list(l)
l.sort()
return l
class AppError(Exception):
pass
class TestApp(object):
# for py.test
disabled = True
def __init__(self, app, extra_environ=None, relative_to=None):
"""
Wraps a WSGI application in a more convenient interface for
testing.
``app`` may be an application, or a Paste Deploy app
URI, like ``'config:filename.ini#test'``.
``extra_environ`` is a dictionary of values that should go
into the environment for each request. These can provide a
communication channel with the application.
``relative_to`` is a directory, and filenames used for file
uploads are calculated relative to this. Also ``config:``
URIs that aren't absolute.
"""
if isinstance(app, (str, unicode)):
from paste.deploy import loadapp
# @@: Should pick up relative_to from calling module's
# __file__
app = loadapp(app, relative_to=relative_to)
self.app = app
self.relative_to = relative_to
if extra_environ is None:
extra_environ = {}
self.extra_environ = extra_environ
self.reset()
def reset(self):
"""
Resets the state of the application; currently just clears
saved cookies.
"""
self.cookies = {}
def _make_environ(self, extra_environ=None):
environ = self.extra_environ.copy()
environ['paste.throw_errors'] = True
if extra_environ:
environ.update(extra_environ)
return environ
def get(self, url, params=None, headers=None, extra_environ=None,
status=None, expect_errors=False):
"""
Get the given url (well, actually a path like
``'/page.html'``).
``params``:
A query string, or a dictionary that will be encoded
into a query string. You may also include a query
string on the ``url``.
``headers``:
A dictionary of extra headers to send.
``extra_environ``:
A dictionary of environmental variables that should
be added to the request.
``status``:
The integer status code you expect (if not 200 or 3xx).
If you expect a 404 response, for instance, you must give
``status=404`` or it will be an error. You can also give
a wildcard, like ``'3*'`` or ``'*'``.
``expect_errors``:
If this is not true, then if anything is written to
``wsgi.errors`` it will be an error. If it is true, then
non-200/3xx responses are also okay.
Returns a ``webob.Response`` object.
"""
environ = self._make_environ(extra_environ)
# Hide from py.test:
__tracebackhide__ = True
if params:
if not isinstance(params, (str, unicode)):
params = urllib.urlencode(params, doseq=True)
if '?' in url:
url += '&'
else:
url += '?'
url += params
url = str(url)
if '?' in url:
url, environ['QUERY_STRING'] = url.split('?', 1)
else:
environ['QUERY_STRING'] = ''
req = TestRequest.blank(url, environ)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def _gen_request(self, method, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a generic request.
"""
environ = self._make_environ(extra_environ)
# @@: Should this be all non-strings?
if isinstance(params, (list, tuple, dict)):
params = urllib.urlencode(params)
if hasattr(params, 'items'):
params = urllib.urlencode(params.items())
if upload_files:
params = cgi.parse_qsl(params, keep_blank_values=True)
content_type, params = self.encode_multipart(
params, upload_files)
environ['CONTENT_TYPE'] = content_type
elif params:
environ.setdefault('CONTENT_TYPE', 'application/x-www-form-urlencoded')
if '?' in url:
url, environ['QUERY_STRING'] = url.split('?', 1)
else:
environ['QUERY_STRING'] = ''
environ['CONTENT_LENGTH'] = str(len(params))
environ['REQUEST_METHOD'] = method
environ['wsgi.input'] = StringIO(params)
req = TestRequest.blank(url, environ)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def post(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a POST request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
``upload_files`` is for file uploads. It should be a list of
``[(fieldname, filename, file_content)]``. You can also use
just ``[(fieldname, filename)]`` and the file content will be
read from disk.
Returns a ``webob.Response`` object.
"""
return self._gen_request('POST', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=upload_files,
expect_errors=expect_errors)
def put(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False):
"""
Do a PUT request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
``upload_files`` is for file uploads. It should be a list of
``[(fieldname, filename, file_content)]``. You can also use
just ``[(fieldname, filename)]`` and the file content will be
read from disk.
Returns a ``webob.Response`` object.
"""
return self._gen_request('PUT', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=upload_files,
expect_errors=expect_errors)
def delete(self, url, headers=None, extra_environ=None,
status=None, expect_errors=False):
"""
Do a DELETE request. Very like the ``.get()`` method.
``params`` are put in the body of the request.
Returns a ``webob.Response`` object.
"""
return self._gen_request('DELETE', url, params=params, headers=headers,
extra_environ=extra_environ,status=status,
upload_files=None, expect_errors=expect_errors)
def encode_multipart(self, params, files):
"""
Encodes a set of parameters (typically a name/value list) and
a set of files (a list of (name, filename, file_body)) into a
typical POST body, returning the (content_type, body).
"""
boundary = '----------a_BoUnDaRy%s$' % random.random()
lines = []
for key, value in params:
lines.append('--'+boundary)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for file_info in files:
key, filename, value = self._get_file_info(file_info)
lines.append('--'+boundary)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename))
fcontent = mimetypes.guess_type(filename)[0]
lines.append('Content-Type: %s' %
fcontent or 'application/octet-stream')
lines.append('')
lines.append(value)
lines.append('--' + boundary + '--')
lines.append('')
body = '\r\n'.join(lines)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def _get_file_info(self, file_info):
if len(file_info) == 2:
# It only has a filename
filename = file_info[1]
if self.relative_to:
filename = os.path.join(self.relative_to, filename)
f = open(filename, 'rb')
content = f.read()
f.close()
return (file_info[0], filename, content)
elif len(file_info) == 3:
return file_info
else:
raise ValueError(
"upload_files need to be a list of tuples of (fieldname, "
"filename, filecontent) or (fieldname, filename); "
"you gave: %r"
% repr(file_info)[:100])
def do_request(self, req, status, expect_errors):
"""
Executes the given request (``req``), with the expected
``status``. Generally ``.get()`` and ``.post()`` are used
instead.
"""
__tracebackhide__ = True
errors = StringIO()
req.environ['wsgi.errors'] = errors
if self.cookies:
c = BaseCookie()
for name, value in self.cookies.items():
c[name] = value
req.environ['HTTP_COOKIE'] = str(c).split(': ', 1)[1]
req.environ['paste.testing'] = True
req.environ['paste.testing_variables'] = {}
app = validator(self.app)
old_stdout = sys.stdout
out = CaptureStdout(old_stdout)
try:
sys.stdout = out
start_time = time.time()
## FIXME: should it be an option to not catch exc_info?
res = req.get_response(app, catch_exc_info=True)
end_time = time.time()
finally:
sys.stdout = old_stdout
sys.stderr.write(out.getvalue())
res.app = app
res.test_app = self
# We do this to make sure the app_iter is exausted:
res.body
res.errors = errors.getvalue()
total_time = end_time - start_time
for name, value in req.environ['paste.testing_variables'].items():
if hasattr(res, name):
raise ValueError(
"paste.testing_variables contains the variable %r, but "
"the response object already has an attribute by that "
"name" % name)
setattr(res, name, value)
if not expect_errors:
self._check_status(status, res)
self._check_errors(res)
res.cookies_set = {}
for header in res.headers.getall('set-cookie'):
c = BaseCookie(header)
for key, morsel in c.items():
self.cookies[key] = morsel.value
res.cookies_set[key] = morsel.value
return res
def _check_status(self, status, res):
__tracebackhide__ = True
if status == '*':
return
if isinstance(status, (list, tuple)):
if res.status_int not in status:
raise AppError(
"Bad response: %s (not one of %s for %s)\n%s"
% (res.status, ', '.join(map(str, status)),
res.request.url, res.body))
return
if status is None:
if res.status_int >= 200 and res.status_int < 400:
return
raise AppError(
"Bad response: %s (not 200 OK or 3xx redirect for %s)\n%s"
% (res.status, res.request.url,
res.body))
if status != res.status_int:
raise AppError(
"Bad response: %s (not %s)" % (res.status, status))
def _check_errors(self, res):
errors = res.errors
if errors:
raise AppError(
"Application had errors logged:\n%s" % errors)
class CaptureStdout(object):
def __init__(self, actual):
self.captured = StringIO()
self.actual = actual
def write(self, s):
self.captured.write(s)
self.actual.write(s)
def flush(self):
self.actual.flush()
def writelines(self, lines):
for item in lines:
self.write(item)
def getvalue(self):
return self.captured.getvalue()
class TestResponse(Response):
"""
Instances of this class are return by ``TestApp``
"""
_forms_indexed = None
def forms__get(self):
"""
Returns a dictionary of ``Form`` objects. Indexes are both in
order (from zero) and by form id (if the form is given an id).
"""
if self._forms_indexed is None:
self._parse_forms()
return self._forms_indexed
forms = property(forms__get,
doc="""
A list of <form>s found on the page (instances of
``Form``)
""")
def form__get(self):
forms = self.forms
if not forms:
raise TypeError(
"You used response.form, but no forms exist")
if 1 in forms:
# There is more than one form
raise TypeError(
"You used response.form, but more than one form exists")
return forms[0]
form = property(form__get,
doc="""
Returns a single ``Form`` instance; it
is an error if there are multiple forms on the
page.
""")
_tag_re = re.compile(r'<(/?)([:a-z0-9_\-]*)(.*?)>', re.S|re.I)
def _parse_forms(self):
forms = self._forms_indexed = {}
form_texts = []
started = None
for match in self._tag_re.finditer(self.body):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag != 'form':
continue
if end:
assert started, (
"</form> unexpected at %s" % match.start())
form_texts.append(self.body[started:match.end()])
started = None
else:
assert not started, (
"Nested form tags at %s" % match.start())
started = match.start()
assert not started, (
"Danging form: %r" % self.body[started:])
for i, text in enumerate(form_texts):
form = Form(self, text)
forms[i] = form
if form.id:
forms[form.id] = form
def follow(self, **kw):
"""
If this request is a redirect, follow that redirect. It
is an error if this is not a redirect response. Returns
another response object.
"""
assert self.status_int >= 300 and self.status_int < 400, (
"You can only follow redirect responses (not %s)"
% self.status)
location = self.headers['location']
type, rest = urllib.splittype(location)
host, path = urllib.splithost(rest)
# @@: We should test that it's not a remote redirect
return self.test_app.get(location, **kw)
def click(self, description=None, linkid=None, href=None,
anchor=None, index=None, verbose=False):
"""
Click the link as described. Each of ``description``,
``linkid``, and ``url`` are *patterns*, meaning that they are
either strings (regular expressions), compiled regular
expressions (objects with a ``search`` method), or callables
returning true or false.
All the given patterns are ANDed together:
* ``description`` is a pattern that matches the contents of the
anchor (HTML and all -- everything between ``<a...>`` and
``</a>``)
* ``linkid`` is a pattern that matches the ``id`` attribute of
the anchor. It will receive the empty string if no id is
given.
* ``href`` is a pattern that matches the ``href`` of the anchor;
the literal content of that attribute, not the fully qualified
attribute.
* ``anchor`` is a pattern that matches the entire anchor, with
its contents.
If more than one link matches, then the ``index`` link is
followed. If ``index`` is not given and more than one link
matches, or if no link matches, then ``IndexError`` will be
raised.
If you give ``verbose`` then messages will be printed about
each link, and why it does or doesn't match. If you use
``app.click(verbose=True)`` you'll see a list of all the
links.
You can use multiple criteria to essentially assert multiple
aspects about the link, e.g., where the link's destination is.
"""
__tracebackhide__ = True
found_html, found_desc, found_attrs = self._find_element(
tag='a', href_attr='href',
href_extract=None,
content=description,
id=linkid,
href_pattern=href,
html_pattern=anchor,
index=index, verbose=verbose)
return self.goto(found_attrs['uri'])
def clickbutton(self, description=None, buttonid=None, href=None,
button=None, index=None, verbose=False):
"""
Like ``.click()``, except looks for link-like buttons.
This kind of button should look like
``<button onclick="...location.href='url'...">``.
"""
__tracebackhide__ = True
found_html, found_desc, found_attrs = self._find_element(
tag='button', href_attr='onclick',
href_extract=re.compile(r"location\.href='(.*?)'"),
content=description,
id=buttonid,
href_pattern=href,
html_pattern=button,
index=index, verbose=verbose)
return self.goto(found_attrs['uri'])
def _find_element(self, tag, href_attr, href_extract,
content, id,
href_pattern,
html_pattern,
index, verbose):
content_pat = _make_pattern(content)
id_pat = _make_pattern(id)
href_pat = _make_pattern(href_pattern)
html_pat = _make_pattern(html_pattern)
_tag_re = re.compile(r'<%s\s+(.*?)>(.*?)</%s>' % (tag, tag),
re.I+re.S)
def printlog(s):
if verbose:
print s
found_links = []
total_links = 0
for match in _tag_re.finditer(self.body):
el_html = match.group(0)
el_attr = match.group(1)
el_content = match.group(2)
attrs = _parse_attrs(el_attr)
if verbose:
printlog('Element: %r' % el_html)
if not attrs.get(href_attr):
printlog(' Skipped: no %s attribute' % href_attr)
continue
el_href = attrs[href_attr]
if href_extract:
m = href_extract.search(el_href)
if not m:
printlog(" Skipped: doesn't match extract pattern")
continue
el_href = m.group(1)
attrs['uri'] = el_href
if el_href.startswith('#'):
printlog(' Skipped: only internal fragment href')
continue
if el_href.startswith('javascript:'):
printlog(' Skipped: cannot follow javascript:')
continue
total_links += 1
if content_pat and not content_pat(el_content):
printlog(" Skipped: doesn't match description")
continue
if id_pat and not id_pat(attrs.get('id', '')):
printlog(" Skipped: doesn't match id")
continue
if href_pat and not href_pat(el_href):
printlog(" Skipped: doesn't match href")
continue
if html_pat and not html_pat(el_html):
printlog(" Skipped: doesn't match html")
continue
printlog(" Accepted")
found_links.append((el_html, el_content, attrs))
if not found_links:
raise IndexError(
"No matching elements found (from %s possible)"
% total_links)
if index is None:
if len(found_links) > 1:
raise IndexError(
"Multiple links match: %s"
% ', '.join([repr(anc) for anc, d, attr in found_links]))
found_link = found_links[0]
else:
try:
found_link = found_links[index]
except IndexError:
raise IndexError(
"Only %s (out of %s) links match; index %s out of range"
% (len(found_links), total_links, index))
return found_link
def goto(self, href, method='get', **args):
"""
Go to the (potentially relative) link ``href``, using the
given method (``'get'`` or ``'post'``) and any extra arguments
you want to pass to the ``app.get()`` or ``app.post()``
methods.
All hostnames and schemes will be ignored.
"""
scheme, host, path, query, fragment = urlparse.urlsplit(href)
# We
scheme = host = fragment = ''
href = urlparse.urlunsplit((scheme, host, path, query, fragment))
href = urlparse.urljoin(self.request.url, href)
method = method.lower()
assert method in ('get', 'post'), (
'Only "get" or "post" are allowed for method (you gave %r)'
% method)
if method == 'get':
method = self.test_app.get
else:
method = self.test_app.post
return method(href, **args)
_normal_body_regex = re.compile(r'[ \n\r\t]+')
_normal_body = None
def normal_body__get(self):
if self._normal_body is None:
self._normal_body = self._normal_body_regex.sub(
' ', self.body)
return self._normal_body
normal_body = property(normal_body__get,
doc="""
Return the whitespace-normalized body
""".strip())
def unicode_normal_body__get(self):
if not self.charset:
raise AttributeError(
"You cannot access Response.unicode_normal_body unless charset is set")
return self.normal_body.decode(self.charset)
unicode_normal_body = property(
unicode_normal_body__get, doc="""
Return the whitespace-normalized body, as unicode
""".strip())
def __contains__(self, s):
"""
A response 'contains' a string if it is present in the body
of the response. Whitespace is normalized when searching
for a string.
"""
if not isinstance(s, basestring):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = str(s)
if isinstance(s, unicode):
body = self.unicode_body
normal_body = self.unicode_normal_body
else:
body = self.body
normal_body = self.normal_body
return s in body or s in normal_body
def mustcontain(self, *strings, **kw):
"""
Assert that the response contains all of the strings passed
in as arguments.
Equivalent to::
assert string in res
"""
if 'no' in kw:
no = kw['no']
del kw['no']
if isinstance(no, basestring):
no = [no]
else:
no = []
if kw:
raise TypeError(
"The only keyword argument allowed is 'no'")
for s in strings:
if not s in self:
print >> sys.stderr, "Actual response (no %r):" % s
print >> sys.stderr, self
raise IndexError(
"Body does not contain string %r" % s)
for no_s in no:
if no_s in self:
print >> sys.stderr, "Actual response (has %r)" % s
print >> sys.stderr, self
raise IndexError(
"Body contains string %r" % s)
def __str__(self):
simple_body = '\n'.join([l for l in self.body.splitlines()
if l.strip()])
return 'Response: %s\n%s\n%s' % (
self.status,
'\n'.join(['%s: %s' % (n, v) for n, v in self.headerlist]),
simple_body)
def __repr__(self):
# Specifically intended for doctests
if self.content_type:
ct = ' %s' % self.content_type
else:
ct = ''
if self.body:
br = repr(self.body)
if len(br) > 18:
br = br[:10]+'...'+br[-5:]
body = ' body=%s/%s' % (br, len(self.body))
else:
body = ' no body'
if self.location:
location = ' location: %s' % self.location
else:
location = ''
return ('<' + self.status + ct + location + body + '>')
def html(self):
"""
Returns the response as a `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/documentation.html>`_
object.
Only works with HTML responses; other content-types raise
AttributeError.
"""
if 'html' not in self.content_type:
raise AttributeError(
"Not an HTML response body (content-type: %s)"
% self.content_type)
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
raise ImportError(
"You must have BeautifulSoup installed to use response.html")
soup = BeautifulSoup(self.body)
return soup
html = property(html, doc=html.__doc__)
def xml(self):
"""
Returns the response as an `ElementTree
<http://python.org/doc/current/lib/module-xml.etree.ElementTree.html>`_
object.
Only works with XML responses; other content-types raise
AttributeError
"""
if 'xml' not in self.content_type:
raise AttributeError(
"Not an XML response body (content-type: %s)"
% self.content_type)
try:
from xml.etree import ElementTree
except ImportError:
try:
import ElementTree
except ImportError:
try:
from elementtree import ElementTree
except ImportError:
raise ImportError(
"You must have ElementTree installed (or use Python 2.5) to use response.xml")
return ElementTree.XML(self.body)
xml = property(xml, doc=xml.__doc__)
def lxml(self):
"""
Returns the response as an `lxml object
<http://codespeak.net/lxml/>`_. You must have lxml installed
to use this.
If this is an HTML response and you have lxml 2.x installed,
then an ``lxml.html.HTML`` object will be returned; if you
have an earlier version of lxml then a ``lxml.HTML`` object
will be returned.
"""
if ('html' not in self.content_type
and 'xml' not in self.content_type):
raise AttributeError(
"Not an XML or HTML response body (content-type: %s)"
% self.content_type)
try:
from lxml import etree
except ImportError:
raise ImportError(
"You must have lxml installed to use response.lxml")
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
## FIXME: would be nice to set xml:base, in some fashion
if self.content_type == 'text/html':
return fromstring(self.body)
else:
return etree.XML(self.body)
lxml = property(lxml, doc=lxml.__doc__)
def json(self):
"""
Return the response as a JSON response. You must have
`simplejson
<http://svn.red-bean.com/bob/simplejson/tags/simplejson-1.7/docs/index.html>`_
installed to use this.
The content type must be application/json to use this.
"""
if self.content_type != 'application/json':
raise AttributeError(
"Not a JSON response body (content-type: %s)"
% self.content_type)
try:
from simplejson import loads
except ImportError:
raise ImportError(
"You must have simplejson installed to use response.json")
return loads(self.body)
json = property(json, doc=json.__doc__)
def showbrowser(self):
"""
Show this response in a browser window (for debugging purposes,
when it's hard to read the HTML).
"""
fn = tempnam_no_warning(None, 'webtest-page') + '.html'
f = open(fn, 'wb')
f.write(self.body)
f.close()
url = 'file:' + fn.replace(os.sep, '/')
webbrowser.open_new(url)
class TestRequest(Request):
# for py.test
disabled = True
ResponseClass = TestResponse
########################################
## Form objects
########################################
class Form(object):
"""
This object represents a form that has been found in a page.
This has a couple useful attributes:
``text``:
the full HTML of the form.
``action``:
the relative URI of the action.
``method``:
the method (e.g., ``'GET'``).
``id``:
the id, or None if not given.
``fields``:
a dictionary of fields, each value is a list of fields by
that name. ``<input type=\"radio\">`` and ``<select>`` are
both represented as single fields with multiple options.
"""
# @@: This really should be using Mechanize/ClientForm or
# something...
_tag_re = re.compile(r'<(/?)([a-z0-9_\-]*)([^>]*?)>', re.I)
def __init__(self, response, text):
self.response = response
self.text = text
self._parse_fields()
self._parse_action()
def _parse_fields(self):
in_select = None
in_textarea = None
fields = {}
for match in self._tag_re.finditer(self.text):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag not in ('input', 'select', 'option', 'textarea',
'button'):
continue
if tag == 'select' and end:
assert in_select, (
'%r without starting select' % match.group(0))
in_select = None
continue
if tag == 'textarea' and end:
assert in_textarea, (
"</textarea> with no <textarea> at %s" % match.start())
in_textarea[0].value = html_unquote(self.text[in_textarea[1]:match.start()])
in_textarea = None
continue
if end:
continue
attrs = _parse_attrs(match.group(3))
if 'name' in attrs:
name = attrs.pop('name')
else:
name = None
if tag == 'option':
in_select.options.append((attrs.get('value'),
'selected' in attrs))
continue
if tag == 'input' and attrs.get('type') == 'radio':
field = fields.get(name)
if not field:
field = Radio(self, tag, name, match.start(), **attrs)
fields.setdefault(name, []).append(field)
else:
field = field[0]
assert isinstance(field, Radio)
field.options.append((attrs.get('value'),
'checked' in attrs))
continue
tag_type = tag
if tag == 'input':
tag_type = attrs.get('type', 'text').lower()
FieldClass = Field.classes.get(tag_type, Field)
field = FieldClass(self, tag, name, match.start(), **attrs)
if tag == 'textarea':
assert not in_textarea, (
"Nested textareas: %r and %r"
% (in_textarea, match.group(0)))
in_textarea = field, match.end()
elif tag == 'select':
assert not in_select, (
"Nested selects: %r and %r"
% (in_select, match.group(0)))
in_select = field
fields.setdefault(name, []).append(field)
self.fields = fields
def _parse_action(self):
self.action = None
for match in self._tag_re.finditer(self.text):
end = match.group(1) == '/'
tag = match.group(2).lower()
if tag != 'form':
continue
if end:
break
attrs = _parse_attrs(match.group(3))
self.action = attrs.get('action', '')
self.method = attrs.get('method', 'GET')
self.id = attrs.get('id')
# @@: enctype?
else:
assert 0, "No </form> tag found"
assert self.action is not None, (
"No <form> tag found")
def __setitem__(self, name, value):
"""
Set the value of the named field. If there is 0 or multiple
fields by that name, it is an error.
Setting the value of a ``<select>`` selects the given option
(and confirms it is an option). Setting radio fields does the
same. Checkboxes get boolean values. You cannot set hidden
fields or buttons.
Use ``.set()`` if there is any ambiguity and you must provide
an index.
"""
fields = self.fields.get(name)
assert fields is not None, (
"No field by the name %r found (fields: %s)"
% (name, ', '.join(map(repr, self.fields.keys()))))
assert len(fields) == 1, (
"Multiple fields match %r: %s"
% (name, ', '.join(map(repr, fields))))
fields[0].value = value
def __getitem__(self, name):
"""
Get the named field object (ambiguity is an error).
"""
fields = self.fields.get(name)
assert fields is not None, (
"No field by the name %r found" % name)
assert len(fields) == 1, (
"Multiple fields match %r: %s"
% (name, ', '.join(map(repr, fields))))
return fields[0]
def set(self, name, value, index=None):
"""
Set the given name, using ``index`` to disambiguate.
"""
if index is None:
self[name] = value
else:
fields = self.fields.get(name)
assert fields is not None, (
"No fields found matching %r" % name)
field = fields[index]
field.value = value
def get(self, name, index=None, default=NoDefault):
"""
Get the named/indexed field object, or ``default`` if no field
is found.
"""
fields = self.fields.get(name)
if fields is None and default is not NoDefault:
return default
if index is None:
return self[name]
else:
fields = self.fields.get(name)
assert fields is not None, (
"No fields found matching %r" % name)
field = fields[index]
return field
def select(self, name, value, index=None):
"""
Like ``.set()``, except also confirms the target is a
``<select>``.
"""
field = self.get(name, index=index)
assert isinstance(field, Select)
field.value = value
def submit(self, name=None, index=None, **args):
"""
Submits the form. If ``name`` is given, then also select that
button (using ``index`` to disambiguate)``.
Any extra keyword arguments are passed to the ``.get()`` or
``.post()`` method.
"""
fields = self.submit_fields(name, index=index)
return self.response.goto(self.action, method=self.method,
params=fields, **args)
def submit_fields(self, name=None, index=None):
"""
Return a list of ``[(name, value), ...]`` for the current
state of the form.
"""
submit = []
if name is not None:
field = self.get(name, index=index)
submit.append((field.name, field.value_if_submitted()))
for name, fields in self.fields.items():
for field in fields:
value = field.value
if value is None:
continue
submit.append((name, value))
return submit
_attr_re = re.compile(r'([^= \n\r\t]+)[ \n\r\t]*(?:=[ \n\r\t]*(?:"([^"]*)"|([^"][^ \n\r\t>]*)))?', re.S)
def _parse_attrs(text):
attrs = {}
for match in _attr_re.finditer(text):
attr_name = match.group(1).lower()
attr_body = match.group(2) or match.group(3)
attr_body = html_unquote(attr_body or '')
attrs[attr_name] = attr_body
return attrs
class Field(object):
"""
Field object.
"""
# Dictionary of field types (select, radio, etc) to classes
classes = {}
settable = True
def __init__(self, form, tag, name, pos,
value=None, id=None, **attrs):
self.form = form
self.tag = tag
self.name = name
self.pos = pos
self._value = value
self.id = id
self.attrs = attrs
def value__set(self, value):
if not self.settable:
raise AttributeError(
"You cannot set the value of the <%s> field %r"
% (self.tag, self.name))
self._value = value
def force_value(self, value):
"""
Like setting a value, except forces it even for, say, hidden
fields.
"""
self._value = value
def value__get(self):
return self._value
value = property(value__get, value__set)
class Select(Field):
"""
Field representing ``<select>``
"""
def __init__(self, *args, **attrs):
super(Select, self).__init__(*args, **attrs)
self.options = []
self.multiple = attrs.get('multiple')
assert not self.multiple, (
"<select multiple> not yet supported")
# Undetermined yet:
self.selectedIndex = None
def value__set(self, value):
for i, (option, checked) in enumerate(self.options):
if option == str(value):
self.selectedIndex = i
break
else:
raise ValueError(
"Option %r not found (from %s)"
% (value, ', '.join(
[repr(o) for o, c in self.options])))
def value__get(self):
if self.selectedIndex is not None:
return self.options[self.selectedIndex][0]
else:
for option, checked in self.options:
if checked:
return option
else:
if self.options:
return self.options[0][0]
else:
return None
value = property(value__get, value__set)
Field.classes['select'] = Select
class Radio(Select):
"""
Field representing ``<input type="radio">``
"""
Field.classes['radio'] = Radio
class Checkbox(Field):
"""
Field representing ``<input type="checkbox">``
"""
def __init__(self, *args, **attrs):
super(Checkbox, self).__init__(*args, **attrs)
self.checked = 'checked' in attrs
def value__set(self, value):
self.checked = not not value
def value__get(self):
if self.checked:
if self._value is None:
# @@: 'on'?
return 'checked'
else:
return self._value
else:
return None
value = property(value__get, value__set)
Field.classes['checkbox'] = Checkbox
class Text(Field):
"""
Field representing ``<input type="text">``
"""
Field.classes['text'] = Text
class Textarea(Text):
"""
Field representing ``<textarea>``
"""
Field.classes['textarea'] = Textarea
class Hidden(Text):
"""
Field representing ``<input type="hidden">``
"""
Field.classes['hidden'] = Hidden
class Submit(Field):
"""
Field representing ``<input type="submit">`` and ``<button>``
"""
settable = False
def value__get(self):
return None
value = property(value__get)
def value_if_submitted(self):
return self._value
Field.classes['submit'] = Submit
Field.classes['button'] = Submit
Field.classes['image'] = Submit
########################################
## Utility functions
########################################
def _popget(d, key, default=None):
"""
Pop the key if found (else return default)
"""
if key in d:
return d.pop(key)
return default
def _space_prefix(pref, full, sep=None, indent=None, include_sep=True):
"""
Anything shared by pref and full will be replaced with spaces
in full, and full returned.
"""
if sep is None:
sep = os.path.sep
pref = pref.split(sep)
full = full.split(sep)
padding = []
while pref and full and pref[0] == full[0]:
if indent is None:
padding.append(' ' * (len(full[0]) + len(sep)))
else:
padding.append(' ' * indent)
full.pop(0)
pref.pop(0)
if padding:
if include_sep:
return ''.join(padding) + sep + sep.join(full)
else:
return ''.join(padding) + sep.join(full)
else:
return sep.join(full)
def _make_pattern(pat):
if pat is None:
return None
if isinstance(pat, (str, unicode)):
pat = re.compile(pat)
if hasattr(pat, 'search'):
return pat.search
if callable(pat):
return pat
assert 0, (
"Cannot make callable pattern object out of %r" % pat)
def html_unquote(v):
"""
Unquote (some) entities in HTML. (incomplete)
"""
for ent, repl in [(' ', ' '), ('>', '>'),
('<', '<'), ('"', '"'),
('&', '&')]:
v = v.replace(ent, repl)
return v
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Product tests for SSH authentication for presto-admin commands
"""
import os
import subprocess
import re
from nose.plugins.attrib import attr
from tests.no_hadoop_bare_image_provider import NoHadoopBareImageProvider
from tests.product.base_product_case import BaseProductTestCase, docker_only
from tests.product.cluster_types import STANDALONE_PRESTO_CLUSTER
from constants import LOCAL_RESOURCES_DIR
from tests.product.config_dir_utils import get_catalog_directory, get_presto_admin_path
class TestAuthentication(BaseProductTestCase):
def setUp(self):
super(TestAuthentication, self).setUp()
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
success_output = (
'Deploying tpch.properties catalog configurations on: slave1 \n'
'Deploying tpch.properties catalog configurations on: master \n'
'Deploying tpch.properties catalog configurations on: slave2 \n'
'Deploying tpch.properties catalog configurations on: slave3 \n'
)
interactive_text = (
'/usr/lib64/python2.6/getpass.py:83: GetPassWarning: Can not control '
'echo on the terminal.\n'
'Initial value for env.password: \n'
'Warning: Password input may be echoed.\n'
' passwd = fallback_getpass(prompt, stream)\n'
)
sudo_password_prompt = (
'[master] out: sudo password:\n'
'[master] out: \n'
'[slave1] out: sudo password:\n'
'[slave1] out: \n'
'[slave2] out: sudo password:\n'
'[slave2] out: \n'
'[slave3] out: sudo password:\n'
'[slave3] out: \n'
)
def parallel_password_failure_message(self, with_sudo_prompt=True):
with open(os.path.join(LOCAL_RESOURCES_DIR,
'parallel_password_failure.txt')) as f:
parallel_password_failure = f.read()
if with_sudo_prompt:
parallel_password_failure += (
'[%(slave3)s] out: sudo password:\n'
'[%(slave3)s] out: Sorry, try again.\n'
'[%(slave2)s] out: sudo password:\n'
'[%(slave2)s] out: Sorry, try again.\n'
'[%(slave1)s] out: sudo password:\n'
'[%(slave1)s] out: Sorry, try again.\n'
'[%(master)s] out: sudo password:\n'
'[%(master)s] out: Sorry, try again.\n')
parallel_password_failure = parallel_password_failure % {
'master': self.cluster.internal_master,
'slave1': self.cluster.internal_slaves[0],
'slave2': self.cluster.internal_slaves[1],
'slave3': self.cluster.internal_slaves[2]}
return parallel_password_failure
def non_root_sudo_warning_message(self):
with open(os.path.join(LOCAL_RESOURCES_DIR,
'non_root_sudo_warning_text.txt')) as f:
non_root_sudo_warning = f.read()
return non_root_sudo_warning
@attr('smoketest')
@docker_only
def test_passwordless_ssh_authentication(self):
self.upload_topology()
self.setup_for_catalog_add()
# Passwordless SSH as root, but specify -I
# We need to do it as a script because docker_py doesn't support
# redirecting stdin.
command_output = self.run_script_from_prestoadmin_dir(
'echo "password" | ./presto-admin catalog add -I')
self.assertEqualIgnoringOrder(
self._remove_python_string(self.success_output + self.interactive_text),
self._remove_python_string(command_output))
# Passwordless SSH as root, but specify -p
command_output = self.run_prestoadmin('catalog add --password '
'password')
self.assertEqualIgnoringOrder(self.success_output, command_output)
# Passwordless SSH as app-admin, specify -I
non_root_sudo_warning = self.non_root_sudo_warning_message()
command_output = self.run_script_from_prestoadmin_dir(
'echo "password" | ./presto-admin catalog add -I -u app-admin')
self.assertEqualIgnoringOrder(
self._remove_python_string(
self.success_output + self.interactive_text +
self.sudo_password_prompt + non_root_sudo_warning),
self._remove_python_string(command_output))
# Passwordless SSH as app-admin, but specify -p
command_output = self.run_prestoadmin('catalog add --password '
'password -u app-admin')
self.assertEqualIgnoringOrder(
self.success_output + self.sudo_password_prompt +
self.sudo_password_prompt, command_output)
# Passwordless SSH as app-admin, but specify wrong password with -I
parallel_password_failure = self.parallel_password_failure_message()
command_output = self.run_script_from_prestoadmin_dir(
'echo "asdf" | ./presto-admin catalog add -I -u app-admin',
raise_error=False)
self.assertEqualIgnoringOrder(
self._remove_python_string(parallel_password_failure + self.interactive_text),
self._remove_python_string(command_output))
# Passwordless SSH as app-admin, but specify wrong password with -p
command_output = self.run_prestoadmin(
'catalog add --password asdf -u app-admin', raise_error=False)
self.assertEqualIgnoringOrder(parallel_password_failure,
command_output)
# Passwordless SSH as root, in serial mode
command_output = self.run_script_from_prestoadmin_dir(
'./presto-admin catalog add --serial')
self.assertEqualIgnoringOrder(
self.success_output, command_output)
@attr('smoketest')
@docker_only
def test_no_passwordless_ssh_authentication(self):
self.upload_topology()
self.setup_for_catalog_add()
# This is needed because the test for
# No passwordless SSH, -I correct -u app-admin,
# was giving Device not a stream error in jenkins
self.run_script_from_prestoadmin_dir(
'echo "password" | ./presto-admin catalog add -I')
for host in self.cluster.all_hosts():
self.cluster.exec_cmd_on_host(
host,
'mv /root/.ssh/id_rsa /root/.ssh/id_rsa.bak'
)
# No passwordless SSH, no -I or -p
parallel_password_failure = self.parallel_password_failure_message(
with_sudo_prompt=False)
command_output = self.run_prestoadmin(
'catalog add', raise_error=False)
self.assertEqualIgnoringOrder(parallel_password_failure,
command_output)
# No passwordless SSH, -p incorrect -u root
command_output = self.run_prestoadmin(
'catalog add --password password', raise_error=False)
self.assertEqualIgnoringOrder(parallel_password_failure,
command_output)
# No passwordless SSH, -I correct -u app-admin
non_root_sudo_warning = self.non_root_sudo_warning_message()
command_output = self.run_script_from_prestoadmin_dir(
'echo "password" | ./presto-admin catalog add -I -u app-admin')
self.assertEqualIgnoringOrder(
self._remove_python_string(
self.success_output + self.interactive_text +
self.sudo_password_prompt + non_root_sudo_warning),
self._remove_python_string(command_output))
# No passwordless SSH, -p correct -u app-admin
command_output = self.run_prestoadmin('catalog add -p password '
'-u app-admin')
self.assertEqualIgnoringOrder(
self.success_output + self.sudo_password_prompt +
self.sudo_password_prompt, command_output)
# No passwordless SSH, specify keyfile with -i
self.cluster.exec_cmd_on_host(
self.cluster.master, 'chmod 600 /root/.ssh/id_rsa.bak')
command_output = self.run_prestoadmin(
'catalog add -i /root/.ssh/id_rsa.bak')
self.assertEqualIgnoringOrder(self.success_output, command_output)
for host in self.cluster.all_hosts():
self.cluster.exec_cmd_on_host(
host,
'mv /root/.ssh/id_rsa.bak /root/.ssh/id_rsa'
)
@attr('smoketest', 'quarantine')
@docker_only
def test_prestoadmin_no_sudo_popen(self):
self.upload_topology()
self.setup_for_catalog_add()
# We use Popen because docker-py loses the first 8 characters of TTY
# output.
args = ['docker', 'exec', '-t', self.cluster.master, 'sudo',
'-u', 'app-admin', get_presto_admin_path(),
'topology show']
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertRegexpMatchesLineByLine(
'Please run presto-admin with sudo.\n'
'\\[Errno 13\\] Permission denied: \'.*/.prestoadmin/log'
'presto-admin.log\'', proc.stdout.read())
def setup_for_catalog_add(self):
connector_script = 'mkdir -p %(catalogs)s\n' \
'echo \'connector.name=tpch\' >> %(catalogs)s/tpch.properties\n' % \
{'catalogs': get_catalog_directory()}
self.run_script_from_prestoadmin_dir(connector_script)
def _remove_python_string(self, text):
return re.sub(r'python2\.6|python2\.7', '', text)
| |
#!/usr/bin/python
import socket
import select
import errno
import logging
import socks
import os
from prober_utils import *
settings = {
# Note that changing these will invalidate many of the fingerprints
'default_hello_version': TLSRecord.TLS1_0,
'default_record_version': TLSRecord.TLS1_0,
'socket_timeout': 5
}
class Probe(object):
#
# Reusable standard elements
#
def connect(self, ipaddress, port, starttls_mode):
# Check if we're using socks
if os.environ.has_key('socks_proxy'):
socks_host, socks_port = os.environ['socks_proxy'].split(':')
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, socks_host, int(socks_port))
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(settings['socket_timeout'])
s.connect((ipaddress, port))
# Do starttls if relevant
starttls(s, port, starttls_mode)
return s.makefile('rw', 0)
def test(self, sock):
pass
def process_response(self, sock):
response = ''
got_done = False
while True:
# Check if there is anything following the server done
if got_done:
# If no data then we're done (the server hasn't sent anything further)
# we allow 500ms to give the followup time to arrive
if not select.select([sock.fileno(),],[],[],0.5)[0]:
break
try:
record = read_tls_record(sock)
response += '*(%x)' % record.version() # TODO: Not sure that recording the record layer version is worth it?
except socket.timeout, e:
response += 'error:timeout'
break
except socket.error, e:
response += 'error:%s|' % errno.errorcode[e.errno]
break
except IOError, e:
response += 'error:%s|' % str(e)
break
if record.content_type() == TLSRecord.Handshake:
# A single handshake record can contain multiple handshake messages
processed_bytes = 0
while processed_bytes < record.message_length():
message = HandshakeMessage.from_bytes(record.message()[processed_bytes:])
if message.message_type() == message.ServerHello:
response += 'handshake:%s(%x)|' % (message.message_types[message.message_type()], message.server_version())
else:
response += 'handshake:%s|' % (message.message_types[message.message_type()])
if message.message_type() == HandshakeMessage.ServerHelloDone:
got_done = True
processed_bytes += message.message_length() + 4
if got_done:
continue
elif record.content_type() == TLSRecord.Alert:
alert = AlertMessage.from_bytes(record.message())
if alert.alert_level() == AlertMessage.Fatal:
response += 'alert:%s:fatal|' % alert.alert_types[alert.alert_type()]
break
else:
response += 'alert:%s:warning|' % alert.alert_types[alert.alert_type()]
else:
if record.content_types.has_key(record.content_type()):
response += 'record:%s|' % record.content_types[record.content_type()]
else:
response += 'record:type(%x)|' % record.content_type()
if got_done:
break
return response
def probe(self, ipaddress, port, starttls):
sock = self.connect(ipaddress, port, starttls)
try:
result = self.test(sock)
except socket.timeout, e:
result = 'writeerror:timeout'
return result
except socket.error, e:
result = 'writeerror:%s|' % errno.errorcode[e.errno]
return result
if result:
return result
return self.process_response(sock)
class NormalHandshake(Probe):
'''A normal handshake'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(make_hello())
class DoubleClientHello(Probe):
'''Two client hellos'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(make_hello())
logging.debug('Sending Client Hello...')
sock.write(make_hello())
class ChangeCipherSpec(Probe):
'''Send a hello then change cipher spec'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(make_hello())
logging.debug('Sending ChangeCipherSpec...')
sock.write(make_ccs())
class EmptyChangeCipherSpec(Probe):
'''Send a hello then an empty change cipher spec'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(make_hello())
logging.debug('Sending Empty ChangeCipherSpec...')
record = TLSRecord.create(content_type=TLSRecord.ChangeCipherSpec,
version=TLSRecord.TLS1_0,
message='')
sock.write(record.bytes)
class BadHandshakeMessage(Probe):
'''An invalid handshake message'''
def make_bad_handshake(self):
content = 'Something'
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=content)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(make_hello())
logging.debug('Sending bad handshake message...')
sock.write(self.make_bad_handshake())
class OnlyECCipherSuites(Probe):
'''Try connecting with ECC cipher suites only'''
def make_ec_hello(self):
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
'01234567890123456789012345678901',
[TLS_ECDH_RSA_WITH_RC4_128_SHA,
TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_ec_hello())
class Heartbeat(Probe):
'''Try to send a heartbeat message'''
def make_hb_hello(self):
hb_extension = HeartbeatExtension.create()
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
'01234567890123456789012345678901',
DEFAULT_CIPHERS,
extensions = [ hb_extension ])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def make_heartbeat(self):
heartbeat = HeartbeatMessage.create(HeartbeatMessage.HeartbeatRequest,
'XXXX')
record = TLSRecord.create(content_type=TLSRecord.Heartbeat,
version=TLSRecord.TLS1_0,
message=heartbeat.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_hb_hello())
logging.debug('Sending Heartbeat...')
sock.write(self.make_heartbeat())
class Heartbleed(Probe):
'''Try to send a heartbleed attack'''
def make_hb_hello(self):
hb_extension = HeartbeatExtension.create()
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
'01234567890123456789012345678901',
DEFAULT_CIPHERS,
extensions = [ hb_extension ])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def make_heartbleed(self):
heartbeat = HeartbeatMessage.create(HeartbeatMessage.HeartbeatRequest,
'XXXX', 0x4000)
record = TLSRecord.create(content_type=TLSRecord.Heartbeat,
version=TLSRecord.TLS1_0,
message=heartbeat.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_hb_hello())
logging.debug('Sending Heartbleed...')
sock.write(self.make_heartbleed())
class HighTLSVersion(Probe):
'''Set a high TLS version in the record'''
def make_high_tls_hello(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=0x400,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_high_tls_hello())
class VeryHighTLSVersion(Probe):
'''Set a very high TLS version in the record'''
def make_very_high_tls_hello(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=0xffff,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_very_high_tls_hello())
class ZeroTLSVersion(Probe):
'''Set a zero version in the record'''
def make_zero_tls_hello(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=0x000,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_zero_tls_hello())
class HighHelloVersion(Probe):
'''Set a high version in the hello'''
def make_high_tls_hello(self):
hello = ClientHelloMessage.create(0x400,
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_high_tls_hello())
class VeryHighHelloVersion(Probe):
'''Set a very high version in the hello'''
def make_high_tls_hello(self):
hello = ClientHelloMessage.create(0xffff,
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_high_tls_hello())
class ZeroHelloVersion(Probe):
'''Set a zero version in the hello'''
def make_zero_tls_hello(self):
hello = ClientHelloMessage.create(0x000,
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_zero_tls_hello())
class BadContentType(Probe):
'''Use an invalid content type in the record'''
def make_bad_content_type(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=17,
version=settings['default_record_version'],
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_bad_content_type())
class RecordLengthOverflow(Probe):
'''Make the record length exceed the stated one'''
def make_record_length_overflow(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes,
length=0x0001)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_record_length_overflow())
class RecordLengthUnderflow(Probe):
'''Make the record shorter than the specified length'''
def make_record_length_underflow(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes,
length=0xffff)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
try:
sock.write(self.make_record_length_underflow())
except socket.timeout, e:
result = 'writeerror:timeout'
return result
except socket.error, e:
result = 'writeerror:%s|' % errno.errorcode[e.errno]
return result
class EmptyRecord(Probe):
'''Send an empty record then the hello'''
def make_empty_record(self):
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message='')
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending empty record...')
sock.write(self.make_empty_record())
sock.write(make_hello())
class SplitHelloRecords(Probe):
'''Split the hello over two records'''
def make_split_hello(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
DEFAULT_CIPHERS)
first = hello.bytes[:10]
second = hello.bytes[10:]
record_one = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=first)
record_two = TLSRecord.create(content_type=TLSRecord.Handshake,
version=0x301,
message=second)
#hexdump(record.bytes)
return record_one, record_two
def test(self, sock):
logging.debug('Sending split hello...')
part_one, part_two = self.make_split_hello()
sock.write(part_one)
try:
sock.write(part_two)
except socket.timeout, e:
result = 'writeerror:timeout'
return result
except socket.error, e:
result = 'writeerror:%s|' % errno.errorcode[e.errno]
return result
class SplitHelloPackets(Probe):
'''Split the hello over two packets'''
def test(self, sock):
logging.debug('Sending Client Hello part one...')
record = make_hello()
sock.write(record[:10])
sock.flush()
logging.debug('Sending Client Hello part two...')
sock.write(record[10:])
class NoCiphers(Probe):
'''Send an empty cipher list'''
def make_no_ciphers_hello(self):
hello = ClientHelloMessage.create(settings['default_hello_version'],
'01234567890123456789012345678901',
[])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=settings['default_record_version'],
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending No ciphers Hello...')
sock.write(self.make_no_ciphers_hello())
class SNIWrongName(Probe):
'''Send a server name indication for a non-matching name'''
def make_sni_hello(self, name):
sni_extension = ServerNameExtension.create(name)
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
'01234567890123456789012345678901',
DEFAULT_CIPHERS,
extensions = [ sni_extension ])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_sni_hello('thisisnotyourname'))
class SNILongName(SNIWrongName):
'''Send a server name indication with a long name'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_sni_hello('x'*500))
class SNIEmptyName(SNIWrongName):
'''Send a server name indication with an empty name'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.write(self.make_sni_hello(''))
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import random
import time
from unittest import mock
from gbpservice.nfp.core import context as nfp_context
from gbpservice.nfp.core import controller as nfp_controller
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import manager as nfp_manager
from gbpservice.nfp.core import worker as nfp_worker
from oslo_config import cfg as oslo_config
import six
import unittest2
NFP_MODULES_PATH = ['gbpservice.neutron.tests.unit.nfp.core']
def mocked_get_logging_context(**kwargs):
return {
'meta_id': '',
'auth_token': None,
'namespace': None}
nfp_logging.get_logging_context = mocked_get_logging_context
class MockedPipe(object):
def __init__(self):
self.fd = random.randint(14, 34)
self.other_end_event_proc_func = None
def poll(self, *args, **kwargs):
return False
def send(self, event):
self.other_end_event_proc_func(event)
class MockedProcess(object):
def __init__(self, parent_pipe=None, child_pipe=None,
controller=None):
self.parent_pipe = parent_pipe
self.child_pipe = child_pipe
self.controller = controller
self.daemon = True
self.pid = random.randint(8888, 9999)
def start(self):
self.worker = nfp_worker.NfpWorker({}, threads=0)
self.worker.parent_pipe = self.parent_pipe
self.worker.pipe = self.child_pipe
self.worker.controller = nfp_controller.NfpController(
self.controller._conf, singleton=False)
# fork a new controller object
self.worker.controller.PROCESS_TYPE = "worker"
self.worker.controller._pipe = self.worker.pipe
self.worker.controller._event_handlers = (
self.controller._event_handlers)
self.worker.event_handlers = self.controller.get_event_handlers()
self.parent_pipe.other_end_event_proc_func = (
self.worker._process_event)
self.child_pipe.other_end_event_proc_func = (
self.controller._process_event)
def mocked_pipe(**kwargs):
return MockedPipe(), MockedPipe()
def mocked_process(target=None, args=None):
return MockedProcess(parent_pipe=args[1],
child_pipe=args[2],
controller=args[3])
nfp_controller.PIPE = mocked_pipe
nfp_controller.PROCESS = mocked_process
class MockedWatchdog(object):
def __init__(self, handler, seconds=1, event=None):
if event and event.desc.type == 'poll_event':
# time.sleep(seconds)
handler(event=event)
def cancel(self):
pass
nfp_manager.WATCHDOG = MockedWatchdog
class Object(object):
def __init__(self):
pass
class Test_Process_Model(unittest2.TestCase):
def setUp(self):
nfp_context.init()
def _mocked_fork(self, args):
proc = Object()
pid = random.randint(8888, 9999)
setattr(proc, 'pid', pid)
return proc
def _mocked_oslo_wrap(self):
wrap = Object()
setattr(wrap, 'service', {})
return wrap
def _mocked_event_ack(self, event):
if event.id == 'TEST_EVENT_ACK_FROM_WORKER':
if hasattr(event, 'desc'):
if event.desc.worker:
self.controller.event_ack_wait_obj.set()
def test_nfp_module_init(self):
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_module_init_wait_obj', wait_obj)
nfp_controller.load_nfp_modules(conf, controller)
controller.nfp_module_init_wait_obj.wait(1)
called = controller.nfp_module_init_wait_obj.is_set()
self.assertTrue(called)
def test_nfp_module_init_wrong_path(self):
conf = oslo_config.CONF
conf.nfp_modules_path = ['tmp.nfp']
controller = nfp_controller.NfpController(oslo_config.CONF,
singleton=False)
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_module_init_wait_obj', wait_obj)
nfp_controller.load_nfp_modules(conf, controller)
controller.nfp_module_init_wait_obj.wait(1)
called = controller.nfp_module_init_wait_obj.is_set()
self.assertFalse(called)
def test_nfp_module_post_init_called(self):
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_module_post_init_wait_obj', wait_obj)
nfp_modules = nfp_controller.load_nfp_modules(conf, controller)
nfp_controller.nfp_modules_post_init(conf, nfp_modules, controller)
controller.nfp_module_post_init_wait_obj.wait(1)
called = controller.nfp_module_post_init_wait_obj.is_set()
self.assertTrue(called)
def test_nfp_module_post_init_ignored(self):
# None the post_init method in test handler
from gbpservice.neutron.tests.unit.nfp.core import (
nfp_module)
del nfp_module.nfp_module_post_init
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_module_post_init_wait_obj', wait_obj)
nfp_modules = nfp_controller.load_nfp_modules(conf, controller)
nfp_controller.nfp_modules_post_init(conf, nfp_modules, controller)
controller.nfp_module_post_init_wait_obj.wait(1)
called = controller.nfp_module_post_init_wait_obj.is_set()
self.assertFalse(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController._fork'
)
def test_nfp_controller_launch_2_workers(self, mock_fork):
mock_fork.side_effect = self._mocked_fork
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
controller.launch(2)
# Check if 2 workers are created
workers = controller.get_childrens()
pids = list(workers.keys())
self.assertEqual(len(pids), 2)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController._fork'
)
def test_nfp_controller_launch_4_workers(self, mock_fork):
mock_fork.side_effect = self._mocked_fork
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
controller.launch(4)
# Check if 4 workers are created
workers = controller.get_childrens()
pids = list(workers.keys())
self.assertEqual(len(pids), 4)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController._fork'
)
def test_nfp_rsrc_manager_new_childs(self, mock_fork):
mock_fork.side_effect = self._mocked_fork
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
controller.launch(2)
controller._update_manager()
# Check if 2 workers are added to manager
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 2)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController._fork'
)
def test_nfp_rsrc_manager_kill_child(self, mock_fork):
mock_fork.side_effect = self._mocked_fork
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
controller.launch(2)
controller._update_manager()
# run so that it stores the snapshot
controller._manager.manager_run()
# Mock killing a child, remove it from workers list
workers = controller.get_childrens()
old_childs = list(workers.keys())
del controller.children[old_childs[0]]
# Mock creating a new child which replaces the killed one
wrap = self._mocked_oslo_wrap()
pid = controller.fork_child(wrap)
controller.children[pid] = wrap
# Run one more time and check if it detects the difference
controller._manager.manager_run()
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 2)
if pid not in old_childs:
self.assertFalse(old_childs[0] in pids)
self.assertTrue(old_childs[1] in pids)
def test_post_event_with_no_handler(self):
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
event = controller.create_event(
id='EVENT_INVALID', data='INVALID_DATA',
binding_key='EVENT_INVALID')
try:
controller.post_event(event)
except AssertionError:
return
self.assertTrue(False)
def mocked_compress(self, event):
pass
def mocked_pipe_send(self, pipe, event):
if event.id == 'EVENT_1':
if hasattr(event, 'desc'):
if event.desc.worker:
self.controller.nfp_event_1_wait_obj.set()
elif 'EVENT_LOAD' in event.id:
if hasattr(event, 'desc'):
if event.desc.worker == event.data:
self.controller.nfp_event_load_wait_obj.set()
elif 'SEQUENCE' in event.id:
if hasattr(event, 'desc'):
if event.desc.worker:
if 'EVENT_1' in event.id:
self.controller.sequence_event_1_wait_obj.set()
elif 'EVENT_2' in event.id:
self.controller.sequence_event_2_wait_obj.set()
elif 'POLL' in event.id:
if hasattr(event, 'desc'):
if hasattr(event.desc, 'poll_desc'):
if event.desc.worker:
if event.id == 'POLL_EVENT':
self.controller.poll_event_wait_obj.set()
if event.id == 'POLL_EVENT_DECORATOR':
self.controller.poll_event_dec_wait_obj.set()
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send'
)
def test_post_event_in_distributor(self, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_event_1_wait_obj', wait_obj)
event = controller.create_event(
id='EVENT_1',
data='post_event_in_distributor')
# Store in class object
self.controller = controller
controller.post_event(event)
controller.nfp_event_1_wait_obj.wait(1)
called = controller.nfp_event_1_wait_obj.is_set()
self.assertTrue(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send')
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_load_distribution_to_workers(self, mock_compress, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(3)
controller._update_manager()
# Load distribution as -> worker1 - 2, worker2 - 4, worker3 - 6
# 10 events to be distributed.
# worker1 will get 5
# worker2 will get 4
# worker3 will get 1
# At the end all workers should be @load 7
# Initialize with above load
init_load = [6, 4, 2]
worker_pids = []
resource_map = controller._manager._resource_map
for pid, em in six.iteritems(resource_map):
load = init_load.pop()
em._load = load
worker_pids.append(pid)
events = [
controller.create_event(id='EVENT_LOAD_1', data=worker_pids[0]),
controller.create_event(id='EVENT_LOAD_2', data=worker_pids[0]),
controller.create_event(id='EVENT_LOAD_3', data=worker_pids[0]),
controller.create_event(id='EVENT_LOAD_4', data=worker_pids[1]),
controller.create_event(id='EVENT_LOAD_5', data=worker_pids[0]),
controller.create_event(id='EVENT_LOAD_6', data=worker_pids[1]),
controller.create_event(id='EVENT_LOAD_7', data=worker_pids[0]),
controller.create_event(id='EVENT_LOAD_8', data=worker_pids[1]),
controller.create_event(id='EVENT_LOAD_9', data=worker_pids[2])]
for i in range(0, 9):
wait_obj = multiprocessing.Event()
setattr(controller, 'nfp_event_load_wait_obj', wait_obj)
event = events[i]
controller.post_event(event)
controller.nfp_event_load_wait_obj.wait(1)
called = controller.nfp_event_load_wait_obj.is_set()
self.assertTrue(called)
def test_new_event_with_sequence_and_no_binding_key(self):
conf = oslo_config.CONF
conf.nfp_modules_path = []
controller = nfp_controller.NfpController(conf, singleton=False)
event = controller.create_event(
id='EVENT_SEQUENCE', data='NO_DATA',
serialize=True)
self.assertTrue(event is None)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send'
)
def test_events_sequencing_with_same_binding_key(self, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_1_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_2_wait_obj', wait_obj)
event_1 = controller.create_event(
id='SEQUENCE_EVENT_1', data='NO_DATA',
serialize=True, binding_key='SEQUENCE')
event_2 = controller.create_event(
id='SEQUENCE_EVENT_2', data='NO_DATA',
serialize=True, binding_key='SEQUENCE')
controller.post_event(event_1)
controller.post_event(event_2)
controller._manager.manager_run()
controller.sequence_event_1_wait_obj.wait(1)
called = controller.sequence_event_1_wait_obj.is_set()
self.assertTrue(called)
controller.event_complete(event_1)
controller._manager.manager_run()
controller.sequence_event_2_wait_obj.wait(1)
called = controller.sequence_event_2_wait_obj.is_set()
self.assertTrue(called)
controller.event_complete(event_2)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send'
)
def test_events_sequencing_with_diff_binding_key(self, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_1_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_2_wait_obj', wait_obj)
event_1 = controller.create_event(
id='SEQUENCE_EVENT_1', data='NO_DATA',
serialize=True, binding_key='SEQUENCE_1')
event_2 = controller.create_event(
id='SEQUENCE_EVENT_2', data='NO_DATA',
serialize=True, binding_key='SEQUENCE_2')
controller.post_event(event_1)
controller.post_event(event_2)
controller._manager.manager_run()
controller.sequence_event_1_wait_obj.wait(1)
called = controller.sequence_event_1_wait_obj.is_set()
self.assertTrue(called)
controller.sequence_event_2_wait_obj.wait(1)
called = controller.sequence_event_2_wait_obj.is_set()
self.assertTrue(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send'
)
def test_events_sequencing_negative(self, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_1_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'sequence_event_2_wait_obj', wait_obj)
event_1 = controller.create_event(
id='SEQUENCE_EVENT_1', data='NO_DATA',
serialize=True, binding_key='SEQUENCE')
event_2 = controller.create_event(
id='SEQUENCE_EVENT_2', data='NO_DATA',
serialize=True, binding_key='SEQUENCE')
controller.post_event(event_1)
controller.post_event(event_2)
controller._manager.manager_run()
controller.sequence_event_1_wait_obj.wait(1)
called = controller.sequence_event_1_wait_obj.is_set()
self.assertTrue(called)
controller._manager.manager_run()
controller.sequence_event_2_wait_obj.wait(1)
called = controller.sequence_event_2_wait_obj.is_set()
# Should not be called
self.assertFalse(called)
controller.event_complete(event_1)
controller.event_complete(event_2)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send')
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_poll_event(self, mock_compress, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_wait_obj', wait_obj)
event = controller.create_event(
id='POLL_EVENT', data='NO_DATA')
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
event.desc.worker = list(controller.get_childrens().keys())[0]
ctx = nfp_context.get()
ctx['log_context']['namespace'] = 'nfp_module'
controller.poll_event(event, spacing=1)
# controller._manager.manager_run()
start_time = time.time()
# relinquish for 1sec
time.sleep(1)
# controller.poll()
controller.poll_event_wait_obj.wait(0.1)
called = controller.poll_event_wait_obj.is_set()
end_time = time.time()
self.assertTrue(called)
self.assertEqual(round(end_time - start_time), 1.0)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send')
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_poll_event_with_no_worker(self, mock_compress, mock_pipe_send):
mock_compress.side_effect = self.mocked_compress
mock_pipe_send.side_effect = self.mocked_pipe_send
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_wait_obj', wait_obj)
event = controller.create_event(
id='POLL_EVENT', data='NO_DATA')
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
# Explicitly make it none
event.desc.worker = None
ctx = nfp_context.get()
ctx['log_context']['namespace'] = 'nfp_module'
controller.poll_event(event, spacing=1)
# controller._manager.manager_run()
start_time = time.time()
# relinquish for 1sec
time.sleep(1)
# controller.poll()
controller.poll_event_wait_obj.wait(0.1)
called = controller.poll_event_wait_obj.is_set()
end_time = time.time()
self.assertTrue(called)
self.assertEqual(round(end_time - start_time), 1.0)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.pipe_send')
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_poll_event_with_decorator_spacing(self,
mock_compress, mock_pipe_send):
mock_pipe_send.side_effect = self.mocked_pipe_send
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_dec_wait_obj', wait_obj)
event = controller.create_event(
id='POLL_EVENT_DECORATOR', data='NO_DATA')
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
# Explicitly make it none
event.desc.worker = None
ctx = nfp_context.get()
ctx['log_context']['namespace'] = 'nfp_module'
controller.poll_event(event)
# controller._manager.manager_run()
start_time = time.time()
# relinquish for 2secs
time.sleep(2)
# controller.poll()
controller.poll_event_dec_wait_obj.wait(0.1)
called = controller.poll_event_dec_wait_obj.is_set()
end_time = time.time()
self.assertTrue(called)
self.assertEqual(round(end_time - start_time), 2.0)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_poll_event_with_no_spacing(self, mock_compress):
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
event = controller.create_event(
id='POLL_EVENT_WITHOUT_SPACING', data='NO_DATA')
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
# Explicitly make it none
event.desc.worker = None
try:
controller.poll_event(event)
except AssertionError as aerr:
if aerr.message == "No spacing specified for polling":
self.assertTrue(True)
return
# self.assertTrue(False)
self.assertTrue(True)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_poll_event_with_no_handler(self, mock_compress):
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
event = controller.create_event(
id='POLL_EVENT_WITHOUT_HANDLER', data='NO_DATA')
# Update descriptor
desc = nfp_event.EventDesc(**{})
setattr(event, 'desc', desc)
# Explicitly make it none
event.desc.worker = None
try:
controller.poll_event(event, spacing=1)
except AssertionError as aerr:
if "No poll handler found for event" in aerr.message:
self.assertTrue(True)
return
self.assertTrue(False)
@mock.patch(
'gbpservice.nfp.core.manager.NfpResourceManager._event_acked')
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress')
def test_event_ack_from_worker(self, mock_event_acked, mock_compress):
mock_event_acked.side_effect = self._mocked_event_ack
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
# Check if 1 worker is added to manager
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
wait_obj = multiprocessing.Event()
setattr(controller, 'event_ack_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'event_ack_handler_cb_obj', wait_obj)
event = controller.create_event(
id='TEST_EVENT_ACK_FROM_WORKER', data='NO_DATA')
controller.post_event(event)
controller._manager.manager_run()
# wait for event to be acked
controller.event_ack_wait_obj.wait(1)
called = controller.event_ack_wait_obj.is_set()
self.assertTrue(called)
# Check if event handler callback is invoked
controller.event_ack_handler_cb_obj.wait(1)
called = controller.event_ack_handler_cb_obj.is_set()
self.assertTrue(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress'
)
def test_post_event_from_worker(self, mock_compress):
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
# Check if 1 worker is added to manager
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
wait_obj = multiprocessing.Event()
setattr(controller, 'post_event_worker_wait_obj', wait_obj)
event = controller.create_event(
id='TEST_POST_EVENT_FROM_WORKER', data='NO_DATA')
worker_process = list(controller._worker_process.values())[0]
worker_process.worker.controller.post_event(event)
controller._manager.manager_run()
# Check if event handler callback is invoked
controller.post_event_worker_wait_obj.wait(1)
called = controller.post_event_worker_wait_obj.is_set()
self.assertTrue(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress'
)
def test_poll_event_from_worker(self, mock_compress):
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
# Check if 1 worker is added to manager
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_worker_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_poll_wait_obj', wait_obj)
event = controller.create_event(
id='TEST_POLL_EVENT_FROM_WORKER', data='NO_DATA')
worker_process = list(controller._worker_process.values())[0]
worker_process.worker.controller.post_event(event)
controller._manager.manager_run()
# Check if event handler callback is invoked
controller.poll_event_worker_wait_obj.wait(1)
called = controller.poll_event_worker_wait_obj.is_set()
self.assertTrue(called)
time.sleep(1)
# controller.poll()
controller.poll_event_poll_wait_obj.wait(1)
called = controller.poll_event_poll_wait_obj.is_set()
self.assertTrue(called)
@mock.patch(
'gbpservice.nfp.core.controller.NfpController.compress'
)
def test_poll_event_cancelled_from_worker(self, mock_compress):
mock_compress.side_effect = self.mocked_compress
conf = oslo_config.CONF
conf.nfp_modules_path = NFP_MODULES_PATH
controller = nfp_controller.NfpController(conf, singleton=False)
self.controller = controller
nfp_controller.load_nfp_modules(conf, controller)
# Mock launching of a worker
controller.launch(1)
controller._update_manager()
self.controller = controller
# Check if 1 worker is added to manager
pids = list(controller._manager._resource_map.keys())
self.assertEqual(len(pids), 1)
self.assertTrue(pid in range(8888, 9999) for pid in pids)
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_worker_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_poll_wait_obj', wait_obj)
wait_obj = multiprocessing.Event()
setattr(controller, 'poll_event_poll_cancel_wait_obj', wait_obj)
event = controller.create_event(
id='TEST_POLL_EVENT_CANCEL_FROM_WORKER', data='NO_DATA')
worker_process = list(controller._worker_process.values())[0]
worker_process.worker.controller.post_event(event)
controller._manager.manager_run()
# Check if event handler callback is invoked
controller.poll_event_worker_wait_obj.wait(1)
called = controller.poll_event_worker_wait_obj.is_set()
self.assertTrue(called)
time.sleep(1)
# controller.poll()
controller.poll_event_poll_wait_obj.wait(1)
called = controller.poll_event_poll_wait_obj.is_set()
self.assertTrue(called)
time.sleep(1)
# controller.poll()
controller.poll_event_poll_wait_obj.wait(1)
called = controller.poll_event_poll_wait_obj.is_set()
self.assertTrue(called)
controller.poll_event_poll_cancel_wait_obj.wait(1)
called = controller.poll_event_poll_cancel_wait_obj.is_set()
self.assertTrue(called)
if __name__ == '__main__':
unittest2.main()
| |
#!/usr/bin/env python
#
# ROS node to interface with Naoqi speech recognition and text-to-speech modules
# Tested with NaoQI: 1.12
#
# Copyright (c) 2012, 2013, Miguel Sarabia
# Imperial College London
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the Imperial College London nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import rospy
import actionlib
from dynamic_reconfigure.server import Server as ReConfServer
import dynamic_reconfigure.client
from naoqi_driver_py.cfg import NaoqiSpeechConfig as NodeConfig
from naoqi_driver.naoqi_node import NaoqiNode
from naoqi import (ALBroker, ALProxy, ALModule)
from std_msgs.msg import( String )
from std_srvs.srv import( Empty, EmptyResponse )
from naoqi_bridge_msgs.msg import(
WordRecognized,
SetSpeechVocabularyGoal,
SetSpeechVocabularyResult,
SetSpeechVocabularyAction,
SpeechWithFeedbackGoal,
SpeechWithFeedbackResult,
SpeechWithFeedbackFeedback,
SpeechWithFeedbackAction )
class Constants:
NODE_NAME = "nao_speech"
EVENT = "WordRecognized"
TEXT_STARTED_EVENT = "ALTextToSpeech/TextStarted"
TEXT_DONE_EVENT = "ALTextToSpeech/TextDone"
class Util:
@staticmethod
def parse_vocabulary( vocabulary ):
# Split string
vocabulary_list = vocabulary.split("/")
# Remove surrounding whitespace
vocabulary_list = [ entry.strip() for entry in vocabulary_list]
# Remove empty strings
return filter(None, vocabulary_list)
# Methods for name conversion
@staticmethod
def to_naoqi_name(name):
return "ros{}_{}".format(
name.replace("/", "_"),
rospy.Time.now().to_sec() )
class DummyAudioDevice:
def getOutputVolume(self):
return 0
def setOutputVolume(self, vol):
pass
class NaoSpeech(ALModule, NaoqiNode):
def __init__( self, moduleName ):
# ROS Initialisation
NaoqiNode.__init__(self, Constants.NODE_NAME )
# NAOQi Module initialization
self.moduleName = moduleName
# Causes ALBroker to fill in ip and find a unused port
self.ip = ""
self.port = 0
self.init_almodule()
# Used for speech with feedback mode only
self.speech_with_feedback_flag = False
# State variables
self.conf = None
# Get Audio proxies
# Speech-recognition wrapper will be lazily initialized
self.srw = None
# Subscription to the Proxy events
self.subscribe()
# Start reconfigure server
self.reconf_server = ReConfServer(NodeConfig, self.reconfigure)
# Client for receiving the new information
self.reconf_client = dynamic_reconfigure.client.Client(rospy.get_name())
#Subscribe to speech topic
self.sub = rospy.Subscriber("speech", String, self.say )
# Advertise word recognise topic
self.pub = rospy.Publisher("word_recognized", WordRecognized )
# Register ROS services
self.start_srv = rospy.Service(
"start_recognition",
Empty,
self.start )
self.stop_srv = rospy.Service(
"stop_recognition",
Empty,
self.stop )
# Actionlib server for altering the speech recognition vocabulary
self.setSpeechVocabularyServer = actionlib.SimpleActionServer("speech_vocabulary_action", SetSpeechVocabularyAction,
execute_cb=self.executeSpeechVocabularyAction,
auto_start=False)
# Actionlib server for having speech with feedback
self.speechWithFeedbackServer = actionlib.SimpleActionServer("speech_action", SpeechWithFeedbackAction,
execute_cb=self.executeSpeechWithFeedbackAction,
auto_start=False)
# Start both actionlib servers
self.setSpeechVocabularyServer.start()
self.speechWithFeedbackServer.start()
def init_almodule(self):
# before we can instantiate an ALModule, an ALBroker has to be created
rospy.loginfo("Connecting to NaoQi at %s:%d", self.pip, self.pport)
try:
self.broker = ALBroker("%sBroker" % self.moduleName, self.ip, self.port, self.pip, self.pport)
except RuntimeError,e:
print("Could not connect to NaoQi's main broker")
exit(1)
ALModule.__init__(self, self.moduleName)
self.memProxy = ALProxy("ALMemory",self.pip,self.pport)
# TODO: check self.memProxy.version() for > 1.6
if self.memProxy is None:
rospy.logerr("Could not get a proxy to ALMemory on %s:%d", self.pip, self.pport)
exit(1)
self.tts = self.get_proxy("ALTextToSpeech")
# TODO: check self.memProxy.version() for > 1.6
if self.tts is None:
rospy.logerr("Could not get a proxy to ALTextToSpeech on %s:%d", self.pip, self.pport)
exit(1)
self.audio = self.get_proxy("ALAudioDevice")
if self.audio is None:
# When using simulated naoqi, audio device is not available,
# Use a dummy instead
rospy.logwarn("Proxy to ALAudioDevice not available, using dummy device (normal in simulation; volume controls disabled)")
self.audio = DummyAudioDevice()
def subscribe(self):
# Subscription to the ALProxies events
self.memProxy.subscribeToEvent(Constants.TEXT_DONE_EVENT, self.moduleName, "onTextDone")
self.memProxy.subscribeToEvent(Constants.TEXT_STARTED_EVENT, self.moduleName, "onTextStarted")
def unsubscribe(self):
self.memProxy.unsubscribeToEvent(Constants.TEXT_DONE_EVENT, self.moduleName)
self.memProxy.unsubscribeToEvent(Constants.TEXT_STARTED_EVENT, self.moduleName)
def onTextStarted(self, strVarName, value, strMessage):
# Called when NAO begins or ends the speech. On begin the value = 1
# Must work only on speech with feedback mode
if value == 0 or self.speech_with_feedback_flag == False:
return
# Send feedback via the speech actionlib server
fb = SpeechWithFeedbackFeedback()
self.speechWithFeedbackServer.publish_feedback(fb)
def onTextDone(self, strVarName, value, strMessage):
# Called when NAO begins or ends the speech. On end the value = 1
# Must work only on speech with feedback mode
if value == 0 or self.speech_with_feedback_flag == False:
return
# Change the flag to inform the executeSpeechWithFeedbackAction function that
# the speaking process is over
self.speech_with_feedback_flag = False
def executeSpeechWithFeedbackAction(self, goal):
# Gets the goal and begins the speech
self.speech_with_feedback_flag = True
saystr = goal.say
self.internalSay(saystr)
# Wait till the onTextDone event is called or 2 mins are passed
counter = 0
while self.speech_with_feedback_flag == True and counter < 1200:
rospy.sleep(0.1)
counter += 1
# Send the success feedback
self.speechWithFeedbackServer.set_succeeded()
def executeSpeechVocabularyAction(self, goal):
#~ Called by action client
rospy.loginfo("SetSpeechVocabulary action executing");
words = goal.words
words_str = ""
#~ Empty word list. Send failure.
if len(words) == 0:
setVocabularyResult = SetSpeechVocabularyResult()
setVocabularyResult.success = False
self.setSpeechVocabularyServer.set_succeeded(setVocabularyResult)
return
#~ Create the vocabulary string
for i in range(0, len(words) - 1):
words_str += str(words[i]) + "/"
words_str += words[len(words) - 1]
#~ Update the dynamic reconfigure vocabulary parameter
params = { 'vocabulary' : words_str }
self.reconf_client.update_configuration(params)
#~ Send success
setVocabularyResult = SetSpeechVocabularyResult()
setVocabularyResult.success = True
self.setSpeechVocabularyServer.set_succeeded(setVocabularyResult)
# RECONFIGURE THIS PROGRAM
def reconfigure( self, request, level ):
newConf = {}
#Copy values
newConf["voice"] = request["voice"]
newConf["language"] = request["language"]
newConf["volume"] = request["volume"]
newConf["vocabulary"] = request["vocabulary"]
newConf["audio_expression"] = request["audio_expression"]
newConf["visual_expression"] = request["visual_expression"]
newConf["word_spotting"] = request["word_spotting"]
# Check and update values
if not newConf["voice"]:
newConf["voice"] = self.tts.getVoice()
elif newConf["voice"] not in self.tts.getAvailableVoices():
rospy.logwarn(
"Unknown voice '{}'. Using current voice instead".format(
newConf["voice"] ) )
rospy.loginfo("Voices available: {}".format(
self.tts.getAvailableVoices()))
newConf["voice"] = self.tts.getVoice()
if not newConf["language"]:
newConf["language"] = self.tts.getLanguage()
elif newConf["language"] not in self.tts.getAvailableLanguages():
newConf["language"] = self.tts.getLanguage()
rospy.logwarn(
"Unknown language '{}'. Using current language instead".format(
newConf["language"] ) )
rospy.loginfo("Languages available: {}".format(
self.tts.getAvailableLanguages()))
# If first time and parameter not explicitly set
if not self.conf and not rospy.has_param("~volume"):
newConf["volume"] = self.audio.getOutputVolume()
# if srw is running and the vocabulary request is invalid, ignore it
if self.srw and not Util.parse_vocabulary(newConf["vocabulary"]):
rospy.logwarn("Empty vocabulary. Using current vocabulary instead")
newConf["vocabulary"] = self.conf["vocabulary"]
# Check if we need to restart srw
if self.srw and self.conf and (
newConf["language"] != self.conf["language"] or
newConf["vocabulary"] != self.conf["language"] or
newConf["audio_expression"] != self.conf["audio_expression"] or
newConf["visual_expression"] != self.conf["visual_expression"] or
newConf["word_spotting"] != self.conf["word_spotting"] ):
need_to_restart_speech = True
else:
need_to_restart_speech = False
self.conf = newConf
#If we have enabled the speech recognition wrapper, reconfigure it
if need_to_restart_speech:
self.stop()
self.start()
return self.conf
# CALLBACK FOR SPEECH METHOD
def say( self, request ):
self.internalSay(request.data)
# Used for internal use. Called to say one sentence either from the speech
# action goal callback or message callback
def internalSay( self, sentence ):
#Get current voice parameters
current_voice = self.tts.getVoice()
current_language = self.tts.getLanguage()
current_volume = self.audio.getOutputVolume()
current_gain = self.tts.getVolume()
target_gain = 1.0
#Modify them if needed
if self.conf["voice"] != current_voice:
self.tts.setVoice( self.conf["voice"] )
if self.conf["language"] != current_language:
self.tts.setLanguage( self.conf["language"] )
if self.conf["volume"] != current_volume:
self.audio.setOutputVolume( self.conf["volume"] )
if target_gain != current_gain:
self.tts.setVolume(target_gain)
#Say whatever it is Nao needs to say
self.tts.say( sentence )
#And restore them
if self.conf["voice"] != current_voice:
self.tts.setVoice( current_voice )
if self.conf["language"] != current_language:
self.tts.setLanguage( current_language )
if self.conf["volume"] != current_volume:
self.audio.setOutputVolume( current_volume )
if target_gain != current_gain:
self.tts.setVolume(current_gain)
# SPEECH RECOGNITION SERVICES
def start( self, request = None ):
if self.srw:
rospy.logwarn("Speech recognition already started. Restarting.")
self.srw.close()
# Start only if vocabulary is valid
if Util.parse_vocabulary( self.conf["vocabulary"] ):
self.srw = SpeechRecognitionWrapper(
self.pip,
self.pport,
self.pub,
self.conf )
else:
rospy.logwarn("Empty vocabulary. Ignoring request.")
return EmptyResponse()
def stop( self, request = None ):
if not self.srw:
rospy.logerr("Speech recognition was not started")
else:
self.srw.stop()
self.srw = None
return EmptyResponse()
def shutdown(self):
self.unsubscribe()
# Shutting down broker seems to be not necessary any more
# try:
# self.broker.shutdown()
# except RuntimeError,e:
# rospy.logwarn("Could not shut down Python Broker: %s", e)
#This class is meant to be used only by NaoSpeech
#The speech recognition wrapper is lazily initialised
class SpeechRecognitionWrapper(ALModule):
"""ROS wrapper for Naoqi speech recognition"""
def __init__(self, ip, port, publisher, config):
# Get a (unique) name for naoqi module which is based on the node name
# and is a valid Python identifier (will be useful later)
self.naoqi_name = Util.to_naoqi_name( rospy.get_name() )
#Start ALBroker (needed by ALModule)
self.broker = ALBroker(self.naoqi_name + "_broker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
ip, # parent broker IP
port ) # parent broker port
#Init superclass ALModule
ALModule.__init__( self, self.naoqi_name )
# Start naoqi proxies
self.memory = ALProxy("ALMemory")
self.proxy = ALProxy("ALSpeechRecognition")
#Keep publisher to send word recognized
self.pub = publisher
#Install global variables needed by Naoqi
self.install_naoqi_globals()
#Check no one else is subscribed to this event
subscribers = self.memory.getSubscribers(Constants.EVENT)
if subscribers:
rospy.logwarn("Speech recognition already in use by another node")
for module in subscribers:
self.stop(module)
# Configure this instance
self.reconfigure(config)
#And subscribe to the event raised by speech recognition
rospy.loginfo("Subscribing '{}' to NAO speech recognition".format(
self.naoqi_name) )
self.memory.subscribeToEvent(
Constants.EVENT,
self.naoqi_name,
self.on_word_recognised.func_name )
# Install global variables needed for Naoqi callbacks to work
def install_naoqi_globals(self):
globals()[self.naoqi_name] = self
globals()["memory"] = self.memory
def reconfigure(self, config):
self.proxy.setLanguage( config["language"] )
self.proxy.setAudioExpression( config["audio_expression"] )
self.proxy.setVisualExpression( config["visual_expression"] )
self.proxy.setVocabulary(
Util.parse_vocabulary( config["vocabulary"].encode('utf-8') ),
config["word_spotting"] )
def stop(self, module = None):
if module is None:
module = self.naoqi_name
rospy.loginfo("Unsubscribing '{}' from NAO speech recognition".format(
module))
try:
self.memory.unsubscribeToEvent( Constants.EVENT, module )
except RuntimeError:
rospy.logwarn("Could not unsubscribe from NAO speech recognition")
def on_word_recognised(self, key, value, subscriber_id ):
"""Publish the words recognized by NAO via ROS """
#Create dictionary, by grouping into tuples the list in value
temp_dict = dict( value[i:i+2] for i in range(0, len(value), 2) )
#Delete empty string from dictionary
if '' in temp_dict:
del(temp_dict[''])
self.pub.publish(WordRecognized( temp_dict.keys(), temp_dict.values() ))
if __name__ == '__main__':
ROSNaoSpeechModule = NaoSpeech("ROSNaoSpeechModule")
rospy.loginfo( "ROSNaoSpeechModule running..." )
rospy.spin()
rospy.loginfo("Stopping ROSNaoSpeechModule ...")
#If speech recognition was started make sure we stop it
if ROSNaoSpeechModule.srw:
ROSNaoSpeechModule.srw.stop()
# Shutdown the module
ROSNaoSpeechModule.shutdown();
rospy.loginfo("ROSNaoSpeechModule stopped.")
exit(0)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pymoca-"
cfg.versionfile_source = "src/pymoca/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
# Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.oauth2 import service_account
from googleapiclient import discovery
from googleapiclient.errors import HttpError
def get_client(service_account_json):
"""Returns an authorized API client by discovering the Healthcare API and
creating a service object using the service account credentials JSON."""
api_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
api_version = "v1beta1"
discovery_api = "https://healthcare.googleapis.com/$discovery/rest"
service_name = "healthcare"
credentials = service_account.Credentials.from_service_account_file(
service_account_json
)
scoped_credentials = credentials.with_scopes(api_scopes)
discovery_url = "{}?labels=CHC_BETA&version={}".format(discovery_api, api_version)
return discovery.build(
service_name,
api_version,
discoveryServiceUrl=discovery_url,
credentials=scoped_credentials,
)
def create_dataset(service_account_json, project_id, cloud_region, dataset_id):
"""Creates a dataset."""
client = get_client(service_account_json)
dataset_parent = "projects/{}/locations/{}".format(project_id, cloud_region)
body = {}
request = (
client.projects()
.locations()
.datasets()
.create(parent=dataset_parent, body=body, datasetId=dataset_id)
)
try:
response = request.execute()
print("Created dataset: {}".format(dataset_id))
return response
except HttpError as e:
print("Error, dataset not created: {}".format(e))
return ""
def delete_dataset(service_account_json, project_id, cloud_region, dataset_id):
"""Deletes a dataset."""
client = get_client(service_account_json)
dataset_name = "projects/{}/locations/{}/datasets/{}".format(
project_id, cloud_region, dataset_id
)
request = client.projects().locations().datasets().delete(name=dataset_name)
try:
response = request.execute()
print("Deleted dataset: {}".format(dataset_id))
return response
except HttpError as e:
print("Error, dataset not deleted: {}".format(e))
return ""
def create_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
):
"""Creates a new FHIR store within the parent dataset."""
client = get_client(service_account_json)
fhir_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, cloud_region, dataset_id
)
body = {"version": "STU3"}
request = (
client.projects()
.locations()
.datasets()
.fhirStores()
.create(parent=fhir_store_parent, body=body, fhirStoreId=fhir_store_id)
)
response = request.execute()
print("Created FHIR store: {}".format(fhir_store_id))
return response
def delete_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
):
"""Deletes the specified FHIR store."""
client = get_client(service_account_json)
fhir_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, cloud_region, dataset_id
)
fhir_store_name = "{}/fhirStores/{}".format(fhir_store_parent, fhir_store_id)
request = (
client.projects()
.locations()
.datasets()
.fhirStores()
.delete(name=fhir_store_name)
)
response = request.execute()
print("Deleted FHIR store: {}".format(fhir_store_id))
return response
def parse_command_line_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--service_account_json",
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"),
help="Path to service account JSON file.",
)
parser.add_argument(
"--project_id",
default=os.environ.get("GOOGLE_CLOUD_PROJECT"),
help="GCP cloud project name",
)
parser.add_argument(
"--cloud_region", default="us-central1", help="GCP cloud region"
)
parser.add_argument("--dataset_id", default=None, help="Name of dataset")
parser.add_argument("--fhir_store_id", default=None, help="Name of FHIR store")
command = parser.add_subparsers(dest="command")
command.add_parser("create-dataset", help=create_dataset.__doc__)
command.add_parser("delete-dataset", help=delete_dataset.__doc__)
command.add_parser("create-fhir-store", help=create_fhir_store.__doc__)
command.add_parser("delete-fhir-store", help=delete_fhir_store.__doc__)
return parser.parse_args()
def run_command(args):
"""Calls the program using the specified command."""
if args.project_id is None:
print(
"You must specify a project ID or set the "
'"GOOGLE_CLOUD_PROJECT" environment variable.'
)
return
elif args.command == "create-dataset":
create_fhir_store(
args.service_account_json,
args.project_id,
args.cloud_region,
args.dataset_id,
)
elif args.command == "delete-dataset":
create_fhir_store(
args.service_account_json,
args.project_id,
args.cloud_region,
args.dataset_id,
)
elif args.command == "create-fhir-store":
create_fhir_store(
args.service_account_json,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id,
)
elif args.command == "delete-fhir-store":
delete_fhir_store(
args.service_account_json,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id,
)
def main():
args = parse_command_line_args()
run_command(args)
if __name__ == "__main__":
main()
| |
"""
Function: Process data for sweep frequency mode.
Authors:
Cassio Amador (cassioamador at yahoo.com.br)
Gilson Ronchi (gronchi at if.usp.br)
TODO: zero padding in filter should check first the size of the signal,
so it could could choose best value depending on signal size. Maybe the
same for padding in spectrogram.
Spectrogram could use a pre-sized matrix.
Some functions could be modified to work with hopping frequency.
"""
import numpy as np
from scipy import signal
from read_signal import ReadSignal
import custom_spectrogram as cs
class ProcSweep(ReadSignal):
"""Process sweep data. All data is evaluated at its current sweep."""
def __init__(self, shot, tipo='data', save_locally=1):
# Inherit from ReadSignal class
ReadSignal.__init__(self, shot, tipo)
self.save_locally = save_locally
# evaluate size of points per sweep
self.sweep_size = np.round(self.rate * self.sweep_dur)
# evaluate frequency steps:
self.find_sweep_points()
# evaluate frequency probing window
self.frequency_analysis()
def find_sweep_points(self):
"""Create a list of points (called 'points') where the sweep
started, with the 'time' channel (channel 4)."""
self.read_channel('time', self.save_locally)
# find min value for data, in the first 2 sweeps
mindata = self.bindata['time'][:self.rate * 2 * (self.sweep_dur + self.interv_sweep)].min()
# find all points with this value
zeros = np.where(self.bindata['time'] == mindata)[0]
# mark only the first minimum for each trigger
singlezeros = np.where(np.diff(zeros) > 50)[0]
# list starting from the 2nd sweep
self.points = zeros[singlezeros + 1]
print("Total number of sweeps: %s" % len(self.points))
def time2sweep(self, time):
"""Convert a position in time (ms) to the correspondent
sweep position."""
if not hasattr(self, 'points'):
self.find_sweep_points()
# finds nearest sweep from a specified time
self.sweep_cur = (abs(self.points - time * self.rate * 1e3)).argmin()
return self.sweep_cur
def sweep2time(self, sweep=0):
"""Convert a sweep position to its position in time (ms).
Defaults to current sweep (sweep_cur)"""
if not hasattr(self, 'points'):
self.find_sweep_points()
if sweep == 0:
sweep = self.sweep_cur
return (self.points[sweep] / self.rate) * 1e-3
def frequency_analysis(self):
""" Frequency limits for the probing frequency.
Tests showed that 17.5 GHz and 26.5 GHz for K and Ka bands are the minimum values
that seems to be physic relevant. But, depending on the size of the spectrogram, they are current
out because of the window size."""
self.probing_freq_lim = {}
self.probing_freq_lim['K'] = (18.55, 100)
self.probing_freq_lim['Ka'] = (26.8, 100)
return
self.probing_frequency = {}
self.mask_probe_freq = {}
for channel in ('K', 'Ka'):
probe_freq = np.linspace(
self.freq_start, self.freq_end, self.sweep_size) * self.chan_factor[channel]
self.mask_probe_freq[channel] = np.logical_and(probe_freq >= self.probing_freq_lim[channel][0], probe_freq <= self.probing_freq_lim[channel][1])
self.probing_frequency[channel] = probe_freq[
self.mask_probe_freq[channel]]
self.freqs_start[channel] = min(self.probing_frequency[channel])
self.freqs_end[channel] = max(self.probing_frequency[channel])
def read_single_sweep(self, channel, sweep_cur=100):
"""Read data for the current sweep (sweep_cur), for an
specific channel"""
self.sweep_cur = sweep_cur
if not hasattr(self, 'points'):
self.find_sweep_points()
try:
samples_ini = self.points[self.sweep_cur]
except IndexError:
print(len(self.points), self.sweep_cur)
samples_ini = self.points[self.sweep_cur - self.sweep_size]
samples_end = samples_ini + self.sweep_size
if channel not in self.bindata.keys():
self.read_channel(channel, self.save_locally)
if hasattr(self, 'single_sweep_data') == False:
self.single_sweep_data = {}
self.single_sweep_data[channel] = self.bindata[channel][samples_ini:samples_end]
def single_sweep_time(self, channel, time=30):
"""same as read_single_sweep, but it reads int time (ms)"""
self.time2sweep(time)
self.read_single_sweep(channel, self.sweep_cur)
def bandpass_kaiser(self, channel,freqs=(1, 15)):
ntaps = 16
nyq = 0.5 * self.rate
width = 1.6
atten = signal.kaiser_atten(ntaps, width / nyq)
beta = signal.kaiser_beta(atten)
taps = signal.firwin(ntaps, [freqs[0], freqs[1]], nyq=nyq, pass_zero=False,
window=('kaiser', beta), scale=False)
newsig = signal.filtfilt(taps_kaiser16, 1.0, self.single_sweep_data[channel], padlen=500)
return newsig
def signal_filter(self, channel, freqs=(1, 15)):
"""Filter signal from specific channel. A FFT is performed in
the signal, and the result is multiplied by a window function
(kaiser function), which nulls the undesired beating
frequencies, that are outside of 'freqs'. The signal is then
recreated with an IFFT."""
# zero padding size:
zer_pad_filter = 4
# alias for sweep_size
N = int(self.sweep_size)
# FFT with zero padding
fft = np.fft.rfft(self.single_sweep_data[channel], zer_pad_filter * N)
# bp=fft[:] #used for other plot functions
fmin, fmax = freqs
# creates the beating frequency axis, and finds the position of
# the frequency limits in the axis
fft_freq = np.linspace(0, self.rate / 2.,
num=zer_pad_filter * N / 2)
cmin = (abs(fft_freq - fmin)).argmin()
cmax = (abs(fft_freq - fmax)).argmin()
# creates window function for filter. Second argument of kaiser
# function must be float
window_func = np.concatenate((np.zeros(cmin + 1),
np.kaiser(cmax - cmin, 2.), np.zeros(zer_pad_filter * N / 2 - cmax)))
# multiply the window by the signal's FFT.
bp = np.multiply(fft, window_func)
# Since the signal is REAL, we use IRFFT, and takes only the
# first half, since the other half is symmetric.
newsig = np.fft.irfft(bp)[:N]
return newsig
def plot_sweep(self, channel):
"""Plot binary data for an specific sweep and channel."""
import pylab as p
if not hasattr(self, 'sweep_freq'):
# dict with arrays for the frequencies in each channel.
self.sweep_freq = {}
if channel not in self.sweep_freq.keys():
self.sweep_freq[channel] = np.linspace(
self.freqs_start[channel], self.freqs_end[channel], num=self.sweep_size)
p.plot(self.sweep_freq[channel], self.single_sweep_data[
channel], label="Channel: %s" % channel)
p.xlabel("freq (GHz)")
p.ylabel("beating signal")
def spectrogram(self, channel, window=256, step_scale=16, zer_pad=8, log=0,
group_delay=1, figure=0, normal=0, filtered=1,
beating_freq_filter=(1e3, 15e3), probing_freqs=(0, 100)):
"""Evaluate and plot spectrogram (SFFT) of beating signal.
Some parameters listed (others can be found in the function):
group_delay=1 evaluates group delay.
0 evaluates beating frequency
normal=1 normalize spectrum
log=0
1 for log spectrogram
"""
if filtered == 1:
sig = self.signal_filter(channel, beating_freq_filter)
else:
sig = self.single_sweep_data[channel]
nfft = int(1.25 * self.rate)
f, t, Sxx = signal.spectrogram(sig, self.rate, nperseg=nfft, noverlap=nfft-1, window=signal.get_window('hann', nfft), nfft=1024)
if normal == 1:
Sxx = Sxx / Sxx.max(axis=0)
if not hasattr(self, 'Dt_DF'):
self.Dt_DF = {}
if channel not in self.Dt_DF.keys():
# Inverse of dF/dt sweeping rate:
self.Dt_DF[channel] = self.sweep_dur / ((self.freq_end - self.freq_start) * self.chan_factor[channel])
if not hasattr(self, 'X'):
self.X = {}
if not hasattr(self, 'Y'):
self.Y = {}
# X is and array with the probing frequency.
self.X[channel] = (self.freq_start + (self.freq_end-self.freq_start)*t/t[-1]) * self.chan_factor[channel]
mask_probe_freq = np.logical_and(self.X[channel] > self.probing_freq_lim[channel][0], self.X[channel] < self.probing_freq_lim[channel][1])
self.X[channel] = self.X[channel][mask_probe_freq].copy()
# Y is the beating frequency, in MHz, or the group delay, in ns
self.Y[channel] = f.copy()
if group_delay == 1:
# group delay in ns
self.Y[channel] *= self.Dt_DF[channel]
return Sxx[:, mask_probe_freq]
def spectrogram2(self, channel, window=256, step_scale=16, zer_pad=8, log=0,
group_delay=1, figure=0, normal=0, filtered=0,
beating_freq_filter=(1, 15)):
"""Evaluate and plot spectrogram (SFFT) of beating signal.
Some parameters listed (others can be found in the function):
group_delay=1 evaluates group delay.
0 evaluates beating frequency
normal=1 normalize spectrum
log=0
1 for log spectrogram
"""
if not hasattr(self, 'Dt_DF'):
self.Dt_DF = {}
self.X = {}
self.delta_freq = {}
self.index_X = {}
self.Y = {}
self.mask_bf = {}
if channel not in self.Dt_DF.keys():
tem = np.linspace(0, self.sweep_dur, self.sweep_size)
tem = tem[self.mask_probe_freq[channel]]
time_spec, beat_freq = cs.eval_beat_freq(
tem, window_size=window, step_scale=step_scale, zer_pad=zer_pad)
fmin, fmax, mask_bf = cs.eval_mask(beat_freq, window, beating_freq_filter[
0], beating_freq_filter[1], zer_pad=zer_pad)
self.mask_bf[channel] = mask_bf
# print(len(mask_bf),fmin,fmax,len(beat_freq))
#print(mask_bf, beating_freq_filter, beat_freq)
# Inverse of dF/dt sweeping rate:
self.Dt_DF[channel] = (tem[1] - tem[0]) / \
(self.probing_frequency[channel][1] -
self.probing_frequency[channel][0])
# X is and array with the probing frequency.
self.X[channel] = self.freqs_start[
channel] + time_spec / self.Dt_DF[channel]
self.delta_freq[channel] = self.X[channel][1] - self.X[channel][0]
# Y is the beating frequency, in MHz, or the group delay, in ns
self.Y[channel] = beat_freq[self.mask_bf[channel]]
if group_delay == 1:
# group delay in ns
self.Y[channel] *= self.Dt_DF[channel]
if filtered == 1:
sig = self.signal_filter(channel, beating_freq_filter)
else:
sig = self.single_sweep_data[channel]
mat_cs = cs.spectrogram(
sig, window_size=window, zer_pad=zer_pad, step_scale=step_scale, normalize=normal, freq_mask=self.mask_bf[channel])
return mat_cs
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Deferred handling by L{twisted.trial.unittest.TestCase}.
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
from twisted.internet import defer, threads, reactor
class DeferredSetUpOK(unittest.TestCase):
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self._setUpCalled = True
def test_ok(self):
self.failUnless(self._setUpCalled)
class DeferredSetUpFail(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(unittest.FailTest('i fail'))
def test_ok(self):
DeferredSetUpFail.testCalled = True
self.fail("I should not get called")
class DeferredSetUpCallbackFail(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self.fail('deliberate failure')
def test_ok(self):
DeferredSetUpCallbackFail.testCalled = True
class DeferredSetUpError(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(RuntimeError('deliberate error'))
def test_ok(self):
DeferredSetUpError.testCalled = True
class DeferredSetUpNeverFire(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.Deferred()
def test_ok(self):
DeferredSetUpNeverFire.testCalled = True
class DeferredSetUpSkip(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb1)
return d
def _cb1(self, ignored):
raise unittest.SkipTest("skip me")
def test_ok(self):
DeferredSetUpSkip.testCalled = True
class DeferredTests(unittest.TestCase):
touched = False
def _cb_fail(self, reason):
self.fail(reason)
def _cb_error(self, reason):
raise RuntimeError(reason)
def _cb_skip(self, reason):
raise unittest.SkipTest(reason)
def _touchClass(self, ignored):
self.__class__.touched = True
def setUp(self):
self.__class__.touched = False
def test_pass(self):
return defer.succeed('success')
def test_passGenerated(self):
self._touchClass(None)
yield None
test_passGenerated = defer.deferredGenerator(test_passGenerated)
def test_fail(self):
return defer.fail(self.failureException('I fail'))
def test_failureInCallback(self):
d = defer.succeed('fail')
d.addCallback(self._cb_fail)
return d
def test_errorInCallback(self):
d = defer.succeed('error')
d.addCallback(self._cb_error)
return d
def test_skip(self):
d = defer.succeed('skip')
d.addCallback(self._cb_skip)
d.addCallback(self._touchClass)
return d
def test_thread(self):
return threads.deferToThread(lambda : None)
def test_expectedFailure(self):
d = defer.succeed('todo')
d.addCallback(self._cb_error)
return d
test_expectedFailure.todo = "Expected failure"
class TimeoutTests(unittest.TestCase):
timedOut = None
def test_pass(self):
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
test_pass.timeout = 2
def test_passDefault(self):
# test default timeout
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
def test_timeout(self):
return defer.Deferred()
test_timeout.timeout = 0.1
def test_timeoutZero(self):
return defer.Deferred()
test_timeoutZero.timeout = 0
def test_expectedFailure(self):
return defer.Deferred()
test_expectedFailure.timeout = 0.1
test_expectedFailure.todo = "i will get it right, eventually"
def test_skip(self):
return defer.Deferred()
test_skip.timeout = 0.1
test_skip.skip = "i will get it right, eventually"
def test_errorPropagation(self):
def timedOut(err):
self.__class__.timedOut = err
return err
d = defer.Deferred()
d.addErrback(timedOut)
return d
test_errorPropagation.timeout = 0.1
def test_calledButNeverCallback(self):
d = defer.Deferred()
def neverFire(r):
return defer.Deferred()
d.addCallback(neverFire)
d.callback(1)
return d
test_calledButNeverCallback.timeout = 0.1
class TestClassTimeoutAttribute(unittest.TestCase):
timeout = 0.2
def setUp(self):
self.d = defer.Deferred()
def testMethod(self):
self.methodCalled = True
return self.d
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 01 14:56:56 2013
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_array_less
from statsmodels.stats.proportion import proportion_confint
import statsmodels.stats.proportion as smprop
class Holder(object):
pass
def test_confint_proportion():
from results.results_proportion import res_binom, res_binom_methods
methods = {'agresti_coull' : 'agresti-coull',
'normal' : 'asymptotic',
'beta' : 'exact',
'wilson' : 'wilson',
'jeffrey' : 'bayes'
}
for case in res_binom:
count, nobs = case
for method in methods:
idx = res_binom_methods.index(methods[method])
res_low = res_binom[case].ci_low[idx]
res_upp = res_binom[case].ci_upp[idx]
if np.isnan(res_low) or np.isnan(res_upp):
continue
ci = proportion_confint(count, nobs, alpha=0.05, method=method)
assert_almost_equal(ci, [res_low, res_upp], decimal=6,
err_msg=repr(case) + method)
def test_samplesize_confidenceinterval_prop():
#consistency test for samplesize to achieve confidence_interval
nobs = 20
ci = smprop.proportion_confint(12, nobs, alpha=0.05, method='normal')
res = smprop.samplesize_confint_proportion(12./nobs, (ci[1] - ci[0]) / 2)
assert_almost_equal(res, nobs, decimal=13)
def test_proportion_effect_size():
# example from blog
es = smprop.proportion_effectsize(0.5, 0.4)
assert_almost_equal(es, 0.2013579207903309, decimal=13)
class CheckProportionMixin(object):
def test_proptest(self):
# equality of k-samples
pt = smprop.proportions_chisquare(self.n_success, self.nobs, value=None)
assert_almost_equal(pt[0], self.res_prop_test.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test.p_value, decimal=13)
# several against value
pt = smprop.proportions_chisquare(self.n_success, self.nobs,
value=self.res_prop_test_val.null_value[0])
assert_almost_equal(pt[0], self.res_prop_test_val.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test_val.p_value, decimal=13)
# one proportion against value
pt = smprop.proportions_chisquare(self.n_success[0], self.nobs[0],
value=self.res_prop_test_1.null_value)
assert_almost_equal(pt[0], self.res_prop_test_1.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test_1.p_value, decimal=13)
def test_pairwiseproptest(self):
ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,
multitest_method=None)
assert_almost_equal(ppt.pvals_raw, self.res_ppt_pvals_raw)
ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,
multitest_method='h')
assert_almost_equal(ppt.pval_corrected(), self.res_ppt_pvals_holm)
pptd = smprop.proportions_chisquare_pairscontrol(self.n_success,
self.nobs, multitest_method='hommel')
assert_almost_equal(pptd.pvals_raw, ppt.pvals_raw[:len(self.nobs) - 1],
decimal=13)
class TestProportion(CheckProportionMixin):
def setup(self):
self.n_success = np.array([ 73, 90, 114, 75])
self.nobs = np.array([ 86, 93, 136, 82])
self.res_ppt_pvals_raw = np.array([
0.00533824886503131, 0.8327574849753566, 0.1880573726722516,
0.002026764254350234, 0.1309487516334318, 0.1076118730631731
])
self.res_ppt_pvals_holm = np.array([
0.02669124432515654, 0.8327574849753566, 0.4304474922526926,
0.0121605855261014, 0.4304474922526926, 0.4304474922526926
])
res_prop_test = Holder()
res_prop_test.statistic = 11.11938768628861
res_prop_test.parameter = 3
res_prop_test.p_value = 0.011097511366581344
res_prop_test.estimate = np.array([
0.848837209302326, 0.967741935483871, 0.838235294117647,
0.9146341463414634
]).reshape(4,1, order='F')
res_prop_test.null_value = '''NULL'''
res_prop_test.conf_int = '''NULL'''
res_prop_test.alternative = 'two.sided'
res_prop_test.method = '4-sample test for equality of proportions ' + \
'without continuity correction'
res_prop_test.data_name = 'smokers2 out of patients'
self.res_prop_test = res_prop_test
#> pt = prop.test(smokers2, patients, p=rep(c(0.9), 4), correct=FALSE)
#> cat_items(pt, "res_prop_test_val.")
res_prop_test_val = Holder()
res_prop_test_val.statistic = np.array([
13.20305530710751
]).reshape(1,1, order='F')
res_prop_test_val.parameter = np.array([
4
]).reshape(1,1, order='F')
res_prop_test_val.p_value = 0.010325090041836
res_prop_test_val.estimate = np.array([
0.848837209302326, 0.967741935483871, 0.838235294117647,
0.9146341463414634
]).reshape(4,1, order='F')
res_prop_test_val.null_value = np.array([
0.9, 0.9, 0.9, 0.9
]).reshape(4,1, order='F')
res_prop_test_val.conf_int = '''NULL'''
res_prop_test_val.alternative = 'two.sided'
res_prop_test_val.method = '4-sample test for given proportions without continuity correction'
res_prop_test_val.data_name = 'smokers2 out of patients, null probabilities rep(c(0.9), 4)'
self.res_prop_test_val = res_prop_test_val
#> pt = prop.test(smokers2[1], patients[1], p=0.9, correct=FALSE)
#> cat_items(pt, "res_prop_test_1.")
res_prop_test_1 = Holder()
res_prop_test_1.statistic = 2.501291989664086
res_prop_test_1.parameter = 1
res_prop_test_1.p_value = 0.113752943640092
res_prop_test_1.estimate = 0.848837209302326
res_prop_test_1.null_value = 0.9
res_prop_test_1.conf_int = np.array([0.758364348004061,
0.9094787701686766])
res_prop_test_1.alternative = 'two.sided'
res_prop_test_1.method = '1-sample proportions test without continuity correction'
res_prop_test_1.data_name = 'smokers2[1] out of patients[1], null probability 0.9'
self.res_prop_test_1 = res_prop_test_1
def test_binom_test():
#> bt = binom.test(51,235,(1/6),alternative="less")
#> cat_items(bt, "binom_test_less.")
binom_test_less = Holder()
binom_test_less.statistic = 51
binom_test_less.parameter = 235
binom_test_less.p_value = 0.982022657605858
binom_test_less.conf_int = [0, 0.2659460862574313]
binom_test_less.estimate = 0.2170212765957447
binom_test_less.null_value = 1. / 6
binom_test_less.alternative = 'less'
binom_test_less.method = 'Exact binomial test'
binom_test_less.data_name = '51 and 235'
#> bt = binom.test(51,235,(1/6),alternative="greater")
#> cat_items(bt, "binom_test_greater.")
binom_test_greater = Holder()
binom_test_greater.statistic = 51
binom_test_greater.parameter = 235
binom_test_greater.p_value = 0.02654424571169085
binom_test_greater.conf_int = [0.1735252778065201, 1]
binom_test_greater.estimate = 0.2170212765957447
binom_test_greater.null_value = 1. / 6
binom_test_greater.alternative = 'greater'
binom_test_greater.method = 'Exact binomial test'
binom_test_greater.data_name = '51 and 235'
#> bt = binom.test(51,235,(1/6),alternative="t")
#> cat_items(bt, "binom_test_2sided.")
binom_test_2sided = Holder()
binom_test_2sided.statistic = 51
binom_test_2sided.parameter = 235
binom_test_2sided.p_value = 0.0437479701823997
binom_test_2sided.conf_int = [0.1660633298083073, 0.2752683640289254]
binom_test_2sided.estimate = 0.2170212765957447
binom_test_2sided.null_value = 1. / 6
binom_test_2sided.alternative = 'two.sided'
binom_test_2sided.method = 'Exact binomial test'
binom_test_2sided.data_name = '51 and 235'
alltests = [('larger', binom_test_greater),
('smaller', binom_test_less),
('two-sided', binom_test_2sided)]
for alt, res0 in alltests:
# only p-value is returned
res = smprop.binom_test(51, 235, prop=1. / 6, alternative=alt)
#assert_almost_equal(res[0], res0.statistic)
assert_almost_equal(res, res0.p_value, decimal=13)
# R binom_test returns Copper-Pearson confint
ci_2s = smprop.proportion_confint(51, 235, alpha=0.05, method='beta')
ci_low, ci_upp = smprop.proportion_confint(51, 235, alpha=0.1,
method='beta')
assert_almost_equal(ci_2s, binom_test_2sided.conf_int, decimal=13)
assert_almost_equal(ci_upp, binom_test_less.conf_int[1], decimal=13)
assert_almost_equal(ci_low, binom_test_greater.conf_int[0], decimal=13)
def test_binom_rejection_interval():
# consistency check with binom_test
# some code duplication but limit checks are different
alpha = 0.05
nobs = 200
prop = 12./20
alternative='smaller'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_upp, nobs)
pval = smprop.binom_test(ci_low, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_low + 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='larger'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_low, 0)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='two-sided'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
def test_binom_tost():
# consistency check with two different implementation,
# proportion_confint is tested against R
# no reference case from other package available
ci = smprop.proportion_confint(10, 20, method='beta', alpha=0.1)
bt = smprop.binom_tost(10, 20, *ci)
assert_almost_equal(bt, [0.05] * 3, decimal=12)
ci = smprop.proportion_confint(5, 20, method='beta', alpha=0.1)
bt = smprop.binom_tost(5, 20, *ci)
assert_almost_equal(bt, [0.05] * 3, decimal=12)
# vectorized, TODO: observed proportion = 0 returns nan
ci = smprop.proportion_confint(np.arange(1, 20), 20, method='beta',
alpha=0.05)
bt = smprop.binom_tost(np.arange(1, 20), 20, *ci)
bt = np.asarray(bt)
assert_almost_equal(bt, 0.025 * np.ones(bt.shape), decimal=12)
def test_power_binom_tost():
# comparison numbers from PASS manual
p_alt = 0.6 + np.linspace(0, 0.09, 10)
power = smprop.power_binom_tost(0.5, 0.7, 500, p_alt=p_alt, alpha=0.05)
res_power = np.array([0.9965, 0.9940, 0.9815, 0.9482, 0.8783, 0.7583,
0.5914, 0.4041, 0.2352, 0.1139])
assert_almost_equal(power, res_power, decimal=4)
rej_int = smprop.binom_tost_reject_interval(0.5, 0.7, 500)
res_rej_int = (269, 332)
assert_equal(rej_int, res_rej_int)
# TODO: actual alpha=0.0489 for all p_alt above
# another case
nobs = np.arange(20, 210, 20)
power = smprop.power_binom_tost(0.4, 0.6, nobs, p_alt=0.5, alpha=0.05)
res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_power_ztost_prop():
power = smprop.power_ztost_prop(0.1, 0.9, 10, p_alt=0.6, alpha=0.05,
discrete=True, dist='binom')[0]
assert_almost_equal(power, 0.8204, decimal=4) # PASS example
import warnings
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom')[0]
res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.4770, 0.5530,
0.6154, 0.7365, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# with critval_continuity correction
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom', variance_prop=None,
continuity=2, critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom', variance_prop=0.5,
critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7112])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_ztost():
xfair = np.repeat([1,0], [228, 762-228])
# comparing to SAS last output at
# http://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect028.htm
# confidence interval for tost
# generic ztost is moved to weightstats
from statsmodels.stats.weightstats import zconfint, ztost
ci01 = zconfint(xfair, alpha=0.1, ddof=0)
assert_almost_equal(ci01, [0.2719, 0.3265], 4)
res = ztost(xfair, 0.18, 0.38, ddof=0)
assert_almost_equal(res[1][0], 7.1865, 4)
assert_almost_equal(res[2][0], -4.8701, 4)
assert_array_less(res[0], 0.0001)
def test_power_ztost_prop_norm():
# regression test for normal distribution
# from a rough comparison, the results and variations look reasonable
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='norm', variance_prop=0.5,
continuity=0, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0.11450013, 0.27752006, 0.41495922,
0.52944621, 0.62382638, 0.70092914, 0.76341806])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=0)[0]
res_power = np.array([0., 0., 0.02667562, 0.20189793, 0.35099606,
0.47608598, 0.57981118, 0.66496683, 0.73427591,
0.79026127])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,
0.55312718, 0.61549537, 0.66743625, 0.77066806])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,
0.44588687, 0.61549537, 0.66743625, 0.71115563])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=None,
continuity=0, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0., 0.15851942, 0.41611758,
0.5010377 , 0.5708047 , 0.70328247, 0.74210096])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_proportion_ztests():
# currently only consistency test with proportions chisquare
# Note: alternative handling is generic
res1 = smprop.proportions_ztest(15, 20., value=0.5, prop_var=0.5)
res2 = smprop.proportions_chisquare(15, 20., value=0.5)
assert_almost_equal(res1[1], res2[1], decimal=13)
res1 = smprop.proportions_ztest(np.asarray([15, 10]), np.asarray([20., 20]),
value=0, prop_var=None)
res2 = smprop.proportions_chisquare(np.asarray([15, 10]), np.asarray([20., 20]))
# test only p-value
assert_almost_equal(res1[1], res2[1], decimal=13)
if __name__ == '__main__':
test_confint_proportion()
| |
########
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import json
import datetime
from mock import patch, MagicMock, PropertyMock, Mock
from cloudify_rest_client import deployments, executions, blueprints
from cloudify_rest_client.exceptions import CloudifyClientError, \
MissingRequiredDeploymentInputError, UnknownDeploymentInputError
from cloudify_cli.exceptions import CloudifyCliError
from cloudify_cli.constants import DEFAULT_TENANT_NAME
from ... import exceptions
from .mocks import MockListResponse
from .test_base import CliCommandTest
from .constants import (BLUEPRINTS_DIR,
SAMPLE_BLUEPRINT_PATH,
SAMPLE_ARCHIVE_PATH,
SAMPLE_INPUTS_PATH)
class DeploymentUpdatesTest(CliCommandTest):
def _mock_wait_for_executions(self, value):
patcher = patch(
'cloudify_cli.execution_events_fetcher.wait_for_execution',
MagicMock(return_value=PropertyMock(error=value))
)
self.addCleanup(patcher.stop)
patcher.start()
def setUp(self):
super(DeploymentUpdatesTest, self).setUp()
self.use_manager()
self.client.deployment_updates.update = MagicMock()
self.client.blueprints.upload = MagicMock()
self.client.executions = MagicMock()
self.client.deployment_updates.update_with_existing_blueprint = \
MagicMock()
self._mock_wait_for_executions(False)
patcher = patch('cloudify_cli.inputs.inputs_to_dict', MagicMock())
self.addCleanup(patcher.stop)
patcher.start()
def test_deployment_update_get(self):
old_value = 'old value 1'
new_value = 'new value 1'
self.client.deployment_updates.get = Mock(return_value={
'id': 'update-id-1',
'old_inputs': {'inp1': old_value},
'new_inputs': {'inp1': new_value},
})
outcome = self.invoke('deployments get-update update-id-1')
self.assertIn(old_value, outcome.output)
self.assertIn(new_value, outcome.output)
def test_deployment_update_get_json(self):
old_value = 'old value 1'
new_value = 'new value 1'
self.client.deployment_updates.get = Mock(return_value={
'id': 'update-id-1',
'old_inputs': {'inp1': old_value},
'new_inputs': {'inp1': new_value},
})
outcome = self.invoke('deployments get-update update-id-1 --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed['old_inputs'], {'inp1': old_value})
self.assertEqual(parsed['new_inputs'], {'inp1': new_value})
def test_deployment_update_successful(self):
outcome = self.invoke(
'cfy deployments update -p {0} '
'my_deployment'.format(SAMPLE_BLUEPRINT_PATH))
self.assertIn('Updating deployment my_deployment', outcome.logs)
self.assertIn('Finished executing workflow', outcome.logs)
self.assertIn(
'Successfully updated deployment my_deployment', outcome.logs)
def test_deployment_update_failure(self):
self._mock_wait_for_executions(True)
outcome = self.invoke(
'cfy deployments update -p {0} my_deployment'
.format(SAMPLE_BLUEPRINT_PATH),
err_str_segment='',
exception=exceptions.SuppressedCloudifyCliError)
logs = outcome.logs.split('\n')
self.assertIn('Updating deployment my_deployment', logs[-3])
self.assertIn('Execution of workflow', logs[-2])
self.assertIn('failed', logs[-2])
self.assertIn(
'Failed updating deployment my_deployment', logs[-1])
def test_deployment_update_json_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --json-output'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_include_logs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --include-logs'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_skip_install_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --skip-install'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_skip_uninstall_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --skip-uninstall'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_force_flag(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment --force'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_override_workflow_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} my_deployment -w override-wf'
.format(SAMPLE_BLUEPRINT_PATH))
def test_deployment_update_archive_location_parameter(self):
self.invoke(
'cfy deployments update -p {0} my_deployment'
.format(SAMPLE_ARCHIVE_PATH))
def test_dep_update_archive_loc_and_bp_path_parameters_exclusion(self):
self.invoke(
'cfy deployments update -p '
'{0} -n {1}/helloworld/'
'blueprint2.yaml my_deployment'
.format(SAMPLE_BLUEPRINT_PATH, BLUEPRINTS_DIR),
err_str_segment='param should be passed only when updating'
' from an archive'
)
def test_deployment_update_blueprint_filename_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -n blueprint.yaml my_deployment'
.format(SAMPLE_ARCHIVE_PATH))
def test_deployment_update_inputs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -i {1} my_deployment'
.format(SAMPLE_ARCHIVE_PATH, SAMPLE_INPUTS_PATH))
def test_deployment_update_multiple_inputs_parameter(self):
self.invoke(
'cfy deployments update -p '
'{0} -i {1} -i {1} my_deployment'
.format(SAMPLE_ARCHIVE_PATH, SAMPLE_INPUTS_PATH))
def test_deployment_update_no_deployment_id_parameter(self):
outcome = self.invoke(
'cfy deployments update -p '
'{0}'.format(SAMPLE_ARCHIVE_PATH),
err_str_segment='2', # Exit code
exception=SystemExit)
self.assertIn('Missing argument "deployment-id"', outcome.output)
def test_deployment_update_no_bp_path_nor_archive_loc_parameters(self):
self.invoke(
'cfy deployments update my_deployment'.format(
BLUEPRINTS_DIR),
err_str_segment='Must supply either a blueprint '
'(by id of an existing blueprint, or a path to a '
'new blueprint), or new inputs',
exception=CloudifyCliError)
class DeploymentsTest(CliCommandTest):
def setUp(self):
super(DeploymentsTest, self).setUp()
self.use_manager()
def test_deployment_create(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b a-blueprint-id')
def test_deployment_create_with_skip_plugins_validation_flag(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b a --skip-plugins-validation')
call_args = list(self.client.deployments.create.call_args)
self.assertIn('skip_plugins_validation', call_args[1])
self.assertEqual(call_args[1]['skip_plugins_validation'], True)
def test_deployment_create_without_skip_plugins_validation_flag(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id'
})
self.client.deployments.create = MagicMock(return_value=deployment)
self.invoke(
'cfy deployments create deployment -b aa')
call_args = list(self.client.deployments.create.call_args)
self.assertIn('skip_plugins_validation', call_args[1])
self.assertEqual(call_args[1]['skip_plugins_validation'], False)
def test_deployments_delete(self):
self.client.deployments.delete = MagicMock()
self.client.executions.list = MagicMock(
side_effect=CloudifyClientError(
'`Deployment` with ID `my-dep` was not found')
)
self.invoke('cfy deployments delete my-dep')
def test_deployments_execute(self):
execute_response = executions.Execution({'status': 'started'})
get_execution_response = executions.Execution({
'status': 'terminated',
'workflow_id': 'mock_wf',
'deployment_id': 'deployment-id',
'blueprint_id': 'blueprint-id',
'error': '',
'id': 'id',
'created_at': datetime.datetime.now(),
'parameters': {}
})
success_event = {
'event_type': 'workflow_succeeded',
'type': 'foo',
'timestamp': '12345678',
'message': 'workflow execution succeeded',
'error_causes': '<error_causes>',
'deployment_id': 'deployment-id',
'execution_id': '<execution_id>',
'source_id': None,
'target_id': None,
'node_name': '<node_name>',
'operation': '<operation>',
'workflow_id': '<workflow_id>',
'node_instance_id': '<node_instance_id>',
}
get_events_response = MockListResponse([success_event], 1)
self.client.executions.start = MagicMock(
return_value=execute_response)
self.client.executions.get = MagicMock(
return_value=get_execution_response)
self.client.events.list = MagicMock(return_value=get_events_response)
self.invoke('cfy executions start install -d a-deployment-id')
def test_deployments_list_all(self):
self.client.deployments.list = MagicMock(
return_value=MockListResponse()
)
self.invoke('cfy deployments list')
self.invoke('cfy deployments list -t dummy_tenant')
self.invoke('cfy deployments list -a')
def test_deployments_list_of_blueprint(self):
deps = [
{
'blueprint_id': 'b1_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
},
{
'blueprint_id': 'b1_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
},
{
'blueprint_id': 'b2_blueprint',
'created_at': 'now',
'created_by': 'admin',
'updated_at': 'now',
'id': 'id',
'visibility': 'private',
'tenant_name': DEFAULT_TENANT_NAME
}
]
self.client.deployments.list = MagicMock(
return_value=MockListResponse(items=deps)
)
outcome = self.invoke('cfy deployments list -b b1_blueprint -v')
self.assertNotIn('b2_blueprint', outcome.logs)
self.assertIn('b1_blueprint', outcome.logs)
def test_deployments_execute_nonexistent_operation(self):
# Verifying that the CLI allows for arbitrary operation names,
# while also ensuring correct error-handling of nonexistent
# operations
expected_error = "operation nonexistent-operation doesn't exist"
self.client.executions.start = MagicMock(
side_effect=CloudifyClientError(expected_error))
command = \
'cfy executions start nonexistent-operation -d a-deployment-id'
self.invoke(
command,
err_str_segment=expected_error,
exception=CloudifyClientError)
def test_deployments_outputs(self):
outputs = deployments.DeploymentOutputs({
'deployment_id': 'dep1',
'outputs': {
'port': 8080
}
})
deployment = deployments.Deployment({
'outputs': {
'port': {
'description': 'Webserver port.',
'value': '...'
}
}
})
self.client.deployments.get = MagicMock(return_value=deployment)
self.client.deployments.outputs.get = MagicMock(return_value=outputs)
self.invoke('cfy deployments outputs dep1')
def test_deployments_outputs_json(self):
outputs = deployments.DeploymentOutputs({
'deployment_id': 'dep1',
'outputs': {
'port': 8080
}
})
deployment = deployments.Deployment({
'outputs': {
'port': {
'description': 'Webserver port.',
'value': '...'
}
}
})
self.client.deployments.get = MagicMock(return_value=deployment)
self.client.deployments.outputs.get = MagicMock(return_value=outputs)
outcome = self.invoke('cfy deployments outputs dep1 --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed, {
'port': {
'value': 8080,
'description': 'Webserver port.'
}
})
def test_deployments_inputs(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id',
'inputs': {'key1': 'val1', 'key2': 'val2'}
})
expected_outputs = [
'Retrieving inputs for deployment deployment_id...',
'- "key1":',
'Value: val1',
'- "key2":',
'Value: val2',
]
self.client.deployments.get = MagicMock(return_value=deployment)
outcome = self.invoke('cfy deployments inputs deployment_id')
outcome = [o.strip() for o in outcome.logs.split('\n')]
for output in expected_outputs:
self.assertIn(output, outcome)
def test_deployments_inputs_json(self):
deployment = deployments.Deployment({
'deployment_id': 'deployment_id',
'inputs': {'key1': 'val1', 'key2': 'val2'}
})
self.client.deployments.get = MagicMock(return_value=deployment)
outcome = self.invoke('cfy deployments inputs deployment_id --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed, {'key1': 'val1', 'key2': 'val2'})
def test_missing_required_inputs(self):
self._test_deployment_inputs(
MissingRequiredDeploymentInputError,
{'input1': 'value1'},
['Unable to create deployment']
)
def test_invalid_input(self):
self._test_deployment_inputs(
UnknownDeploymentInputError,
{'input1': 'value1',
'input2': 'value2',
'input3': 'value3'},
['Unable to create deployment']
)
def test_deployments_set_visibility(self):
self.client.deployments.set_visibility = MagicMock()
self.invoke('cfy deployments set-visibility a-deployment-id -l '
'tenant')
self.invoke('cfy deployments set-visibility a-deployment-id -l '
'global')
def test_deployments_set_visibility_invalid_argument(self):
self.invoke(
'cfy deployments set-visibility a-deployment-id -l private',
err_str_segment='Invalid visibility: `private`',
exception=CloudifyCliError
)
self.invoke(
'cfy deployments set-visibility a-deployment-id -l bla',
err_str_segment='Invalid visibility: `bla`',
exception=CloudifyCliError
)
def test_deployments_set_visibility_missing_argument(self):
outcome = self.invoke(
'cfy deployments set-visibility a-deployment-id',
err_str_segment='2',
exception=SystemExit
)
self.assertIn('Missing option "-l" / "--visibility"', outcome.output)
def test_deployments_set_visibility_wrong_argument(self):
outcome = self.invoke(
'cfy deployments set-visibility a-deployment-id -g',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('Error: no such option: -g', outcome.output)
def test_deployments_create_mutually_exclusive_arguments(self):
outcome = self.invoke(
'cfy deployments create deployment -b a-blueprint-id -l tenant '
'--private-resource',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('mutually exclusive with arguments:', outcome.output)
def test_deployments_create_invalid_argument(self):
self.invoke(
'cfy deployments create deployment -b a-blueprint-id -l bla'
.format(BLUEPRINTS_DIR),
err_str_segment='Invalid visibility: `bla`',
exception=CloudifyCliError
)
def test_deployments_create_with_visibility(self):
self.client.deployments.create = MagicMock()
self.invoke('cfy deployments create deployment -b a-blueprint-id '
'-l private'
.format(SAMPLE_ARCHIVE_PATH))
def _test_deployment_inputs(self, exception_type,
inputs, expected_outputs=None):
def raise_error(*args, **kwargs):
raise exception_type('no inputs')
blueprint = blueprints.Blueprint({
'plan': {
'inputs': {
'input1': {'description': 'val1'},
'input2': {'description': 'val2'}
}
}
})
self.client.blueprints.get = MagicMock(return_value=blueprint)
self.client.deployments.create = raise_error
inputs_line = ' '.join(
['-i {0}={1}'.format(key, value) for
key, value in inputs.iteritems()])
outcome = self.invoke(
'cfy deployments create deployment -b a-blueprint-id {0}'.format(
inputs_line),
exception=exceptions.SuppressedCloudifyCliError,
err_str_segment='no inputs'
)
outcome = [o.strip() for o in outcome.logs.split('\n')]
if not expected_outputs:
expected_outputs = []
for output in expected_outputs:
found = False
for outcome_line in outcome:
if output in outcome_line:
found = True
break
self.assertTrue(found, 'String ''{0}'' not found in outcome {1}'
.format(output, outcome))
| |
from __future__ import unicode_literals
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.encoding import python_2_unicode_compatible
from taggit.models import TaggedItemBase
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailcore.blocks import CharBlock, RichTextBlock
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel, PageChooserPanel, TabbedInterface, ObjectList
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsearch import index
from wagtail.wagtailimages.models import AbstractImage, Image
from wagtail.wagtailimages.blocks import ImageChooserBlock
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self.serve(request)
# Event page
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items')
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links')
class EventPageSpeaker(Orderable, LinkFields):
page = ParentalKey('tests.EventPage', related_name='speakers')
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = (
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
)
password_required_template = 'tests/event_page_password_required.html'
EventPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers"),
InlinePanel('related_links', label="Related links"),
]
EventPage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Event index (has a separate AJAX template, and a custom template context)
class EventIndex(Page):
intro = RichTextField(blank=True)
ajax_template = 'tests/includes/event_listing.html'
def get_events(self):
return self.get_children().live().type(EventPage)
def get_paginator(self):
return Paginator(self.get_events(), 4)
def get_context(self, request, page=1):
# Pagination
paginator = self.get_paginator()
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update context
context = super(EventIndex, self).get_context(request)
context['events'] = events
return context
def route(self, request, path_components):
if self.live and len(path_components) == 1:
try:
return self.serve(request, page=int(path_components[0]))
except (TypeError, ValueError):
pass
return super(EventIndex, self).route(request, path_components)
def get_static_site_paths(self):
# Get page count
page_count = self.get_paginator().num_pages
# Yield a path for each page
for page in range(page_count):
yield '/%d/' % (page + 1)
# Yield from superclass
for path in super(EventIndex, self).get_static_site_paths():
yield path
def get_sitemap_urls(self):
# Add past events url to sitemap
return super(EventIndex, self).get_sitemap_urls() + [
{
'location': self.full_url + 'past/',
'lastmod': self.latest_revision_created_at
}
]
EventIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields')
class FormPage(AbstractEmailForm):
pass
FormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Snippets
class AdvertPlacement(models.Model):
page = ParentalKey('wagtailcore.Page', related_name='advert_placements')
advert = models.ForeignKey('tests.Advert', related_name='+')
colour = models.CharField(max_length=255)
@python_2_unicode_compatible
class Advert(models.Model):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(Advert)
class StandardIndex(Page):
""" Index for the site, not allowed to be placed anywhere """
parent_page_types = []
# A custom panel setup where all Promote fields are placed in the Content tab instead;
# we use this to test that the 'promote' tab is left out of the output when empty
StandardIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('seo_title'),
FieldPanel('slug'),
InlinePanel('advert_placements', label="Adverts"),
]
StandardIndex.promote_panels = []
class StandardChild(Page):
pass
# Test overriding edit_handler with a custom one
StandardChild.edit_handler = TabbedInterface([
ObjectList(StandardChild.content_panels, heading='Content'),
ObjectList(StandardChild.promote_panels, heading='Promote'),
ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'),
ObjectList([], heading='Dinosaurs'),
])
class BusinessIndex(Page):
""" Can be placed anywhere, can only have Business children """
subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex']
class BusinessSubIndex(Page):
""" Can be placed under BusinessIndex, and have BusinessChild children """
subpage_types = ['tests.BusinessChild']
parent_page_types = ['tests.BusinessIndex']
class BusinessChild(Page):
""" Can only be placed under Business indexes, no children allowed """
subpage_types = []
parent_page_types = ['tests.BusinessIndex', BusinessSubIndex]
class TaggedPageTag(TaggedItemBase):
content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items')
class TaggedPage(Page):
tags = ClusterTaggableManager(through=TaggedPageTag, blank=True)
TaggedPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
class PageChooserModel(models.Model):
page = models.ForeignKey('wagtailcore.Page', help_text='help text')
class EventPageChooserModel(models.Model):
page = models.ForeignKey('tests.EventPage', help_text='more help text')
class SnippetChooserModel(models.Model):
advert = models.ForeignKey(Advert, help_text='help text')
panels = [
SnippetChooserPanel('advert', Advert),
]
class CustomImageWithoutAdminFormFields(AbstractImage):
caption = models.CharField(max_length=255)
not_editable_field = models.CharField(max_length=255)
class CustomImageWithAdminFormFields(AbstractImage):
caption = models.CharField(max_length=255)
not_editable_field = models.CharField(max_length=255)
admin_form_fields = Image.admin_form_fields + (
'caption',
)
class StreamModel(models.Model):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
| |
"""
SoftLayer.vs
~~~~~~~~~~~~
VS Manager/helpers
:license: MIT, see LICENSE for more details.
"""
import datetime
import logging
import socket
import time
import warnings
from SoftLayer.decoration import retry
from SoftLayer import exceptions
from SoftLayer.managers import ordering
from SoftLayer import utils
LOGGER = logging.getLogger(__name__)
# pylint: disable=no-self-use
class VSManager(utils.IdentifierMixin, object):
"""Manages SoftLayer Virtual Servers.
See product information here: http://www.softlayer.com/virtual-servers
Example::
# Initialize the VSManager.
# env variables. These can also be specified in ~/.softlayer,
# or passed directly to SoftLayer.Client()
# SL_USERNAME = YOUR_USERNAME
# SL_API_KEY = YOUR_API_KEY
import SoftLayer
client = SoftLayer.Client()
mgr = SoftLayer.VSManager(client)
:param SoftLayer.API.BaseClient client: the client instance
:param SoftLayer.managers.OrderingManager ordering_manager: an optional
manager to handle ordering.
If none is provided, one will be
auto initialized.
"""
def __init__(self, client, ordering_manager=None):
self.client = client
self.account = client['Account']
self.guest = client['Virtual_Guest']
self.resolvers = [self._get_ids_from_ip, self._get_ids_from_hostname]
if ordering_manager is None:
self.ordering_manager = ordering.OrderingManager(client)
else:
self.ordering_manager = ordering_manager
@retry(logger=LOGGER)
def list_instances(self, hourly=True, monthly=True, tags=None, cpus=None,
memory=None, hostname=None, domain=None,
local_disk=None, datacenter=None, nic_speed=None,
public_ip=None, private_ip=None, **kwargs):
"""Retrieve a list of all virtual servers on the account.
Example::
# Print out a list of hourly instances in the DAL05 data center.
for vsi in mgr.list_instances(hourly=True, datacenter='dal05'):
print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress']
# Using a custom object-mask. Will get ONLY what is specified
object_mask = "mask[hostname,monitoringRobot[robotStatus]]"
for vsi in mgr.list_instances(mask=object_mask,hourly=True):
print vsi
:param boolean hourly: include hourly instances
:param boolean monthly: include monthly instances
:param list tags: filter based on list of tags
:param integer cpus: filter based on number of CPUS
:param integer memory: filter based on amount of memory
:param string hostname: filter based on hostname
:param string domain: filter based on domain
:param string local_disk: filter based on local_disk
:param string datacenter: filter based on datacenter
:param integer nic_speed: filter based on network speed (in MBPS)
:param string public_ip: filter based on public ip address
:param string private_ip: filter based on private ip address
:param dict \\*\\*kwargs: response-level options (mask, limit, etc.)
:returns: Returns a list of dictionaries representing the matching
virtual servers
"""
if 'mask' not in kwargs:
items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'datacenter',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
kwargs['mask'] = "mask[%s]" % ','.join(items)
call = 'getVirtualGuests'
if not all([hourly, monthly]):
if hourly:
call = 'getHourlyVirtualGuests'
elif monthly:
call = 'getMonthlyVirtualGuests'
_filter = utils.NestedDict(kwargs.get('filter') or {})
if tags:
_filter['virtualGuests']['tagReferences']['tag']['name'] = {
'operation': 'in',
'options': [{'name': 'data', 'value': tags}],
}
if cpus:
_filter['virtualGuests']['maxCpu'] = utils.query_filter(cpus)
if memory:
_filter['virtualGuests']['maxMemory'] = utils.query_filter(memory)
if hostname:
_filter['virtualGuests']['hostname'] = utils.query_filter(hostname)
if domain:
_filter['virtualGuests']['domain'] = utils.query_filter(domain)
if local_disk is not None:
_filter['virtualGuests']['localDiskFlag'] = (
utils.query_filter(bool(local_disk)))
if datacenter:
_filter['virtualGuests']['datacenter']['name'] = (
utils.query_filter(datacenter))
if nic_speed:
_filter['virtualGuests']['networkComponents']['maxSpeed'] = (
utils.query_filter(nic_speed))
if public_ip:
_filter['virtualGuests']['primaryIpAddress'] = (
utils.query_filter(public_ip))
if private_ip:
_filter['virtualGuests']['primaryBackendIpAddress'] = (
utils.query_filter(private_ip))
kwargs['filter'] = _filter.to_dict()
func = getattr(self.account, call)
return func(**kwargs)
@retry(logger=LOGGER)
def get_instance(self, instance_id, **kwargs):
"""Get details about a virtual server instance.
:param integer instance_id: the instance ID
:returns: A dictionary containing a large amount of information about
the specified instance.
Example::
# Print out instance ID 12345.
vsi = mgr.get_instance(12345)
print vsi
# Print out only FQDN and primaryIP for instance 12345
object_mask = "mask[fullyQualifiedDomainName,primaryIpAddress]"
vsi = mgr.get_instance(12345, mask=mask)
print vsi
"""
if 'mask' not in kwargs:
kwargs['mask'] = (
'id,'
'globalIdentifier,'
'fullyQualifiedDomainName,'
'hostname,'
'domain,'
'createDate,'
'modifyDate,'
'provisionDate,'
'notes,'
'dedicatedAccountHostOnlyFlag,'
'privateNetworkOnlyFlag,'
'primaryBackendIpAddress,'
'primaryIpAddress,'
'''networkComponents[id, status, speed, maxSpeed, name,
macAddress, primaryIpAddress, port,
primarySubnet,
securityGroupBindings[
securityGroup[id, name]]],'''
'lastKnownPowerState.name,'
'powerState,'
'status,'
'maxCpu,'
'maxMemory,'
'datacenter,'
'activeTransaction[id, transactionStatus[friendlyName,name]],'
'lastOperatingSystemReload.id,'
'blockDevices,'
'blockDeviceTemplateGroup[id, name, globalIdentifier],'
'postInstallScriptUri,'
'''operatingSystem[passwords[username,password],
softwareLicense.softwareDescription[
manufacturer,name,version,
referenceCode]],'''
'''softwareComponents[
passwords[username,password,notes],
softwareLicense[softwareDescription[
manufacturer,name,version,
referenceCode]]],'''
'hourlyBillingFlag,'
'userData,'
'''billingItem[id,nextInvoiceTotalRecurringAmount,
children[categoryCode,nextInvoiceTotalRecurringAmount],
orderItem[id,
order.userRecord[username],
preset.keyName]],'''
'tagReferences[id,tag[name,id]],'
'networkVlans[id,vlanNumber,networkSpace],'
'dedicatedHost.id'
)
return self.guest.getObject(id=instance_id, **kwargs)
@retry(logger=LOGGER)
def get_create_options(self):
"""Retrieves the available options for creating a VS.
:returns: A dictionary of creation options.
Example::
# Prints out the create option dictionary
options = mgr.get_create_options()
print(options)
"""
return self.guest.getCreateObjectOptions()
def cancel_instance(self, instance_id):
"""Cancel an instance immediately, deleting all its data.
:param integer instance_id: the instance ID to cancel
Example::
# Cancels instance 12345
mgr.cancel_instance(12345)
"""
return self.guest.deleteObject(id=instance_id)
def reload_instance(self, instance_id,
post_uri=None,
ssh_keys=None,
image_id=None):
"""Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url)
"""
config = {}
if post_uri:
config['customProvisionScriptUri'] = post_uri
if ssh_keys:
config['sshKeyIds'] = [key_id for key_id in ssh_keys]
if image_id:
config['imageTemplateId'] = image_id
return self.client.call('Virtual_Guest', 'reloadOperatingSystem',
'FORCE', config, id=instance_id)
def _generate_create_dict(
self, cpus=None, memory=None, hourly=True,
hostname=None, domain=None, local_disk=True,
datacenter=None, os_code=None, image_id=None,
dedicated=False, public_vlan=None, private_vlan=None,
userdata=None, nic_speed=None, disks=None, post_uri=None,
private=False, ssh_keys=None, public_security_groups=None,
private_security_groups=None, boot_mode=None, **kwargs):
"""Returns a dict appropriate to pass into Virtual_Guest::createObject
See :func:`create_instance` for a list of available options.
"""
required = [hostname, domain]
flavor = kwargs.get('flavor', None)
host_id = kwargs.get('host_id', None)
mutually_exclusive = [
{'os_code': os_code, 'image_id': image_id},
{'cpu': cpus, 'flavor': flavor},
{'memory': memory, 'flavor': flavor},
{'flavor': flavor, 'dedicated': dedicated},
{'flavor': flavor, 'host_id': host_id}
]
if not all(required):
raise ValueError("hostname, and domain are required")
for mu_ex in mutually_exclusive:
if all(mu_ex.values()):
raise ValueError(
'Can only specify one of: %s' % (','.join(mu_ex.keys())))
data = {
"startCpus": cpus,
"maxMemory": memory,
"hostname": hostname,
"domain": domain,
"localDiskFlag": local_disk,
"hourlyBillingFlag": hourly,
"supplementalCreateObjectOptions": {
"bootMode": boot_mode
}
}
if flavor:
data["supplementalCreateObjectOptions"]["flavorKeyName"] = flavor
if dedicated and not host_id:
data["dedicatedAccountHostOnlyFlag"] = dedicated
if host_id:
data["dedicatedHost"] = {"id": host_id}
if private:
data['privateNetworkOnlyFlag'] = private
if image_id:
data["blockDeviceTemplateGroup"] = {"globalIdentifier": image_id}
elif os_code:
data["operatingSystemReferenceCode"] = os_code
if datacenter:
data["datacenter"] = {"name": datacenter}
if public_vlan:
data.update({
'primaryNetworkComponent': {
"networkVlan": {"id": int(public_vlan)}}})
if private_vlan:
data.update({
"primaryBackendNetworkComponent": {
"networkVlan": {"id": int(private_vlan)}}})
if public_security_groups:
secgroups = [{'securityGroup': {'id': int(sg)}}
for sg in public_security_groups]
pnc = data.get('primaryNetworkComponent', {})
pnc['securityGroupBindings'] = secgroups
data.update({'primaryNetworkComponent': pnc})
if private_security_groups:
secgroups = [{'securityGroup': {'id': int(sg)}}
for sg in private_security_groups]
pbnc = data.get('primaryBackendNetworkComponent', {})
pbnc['securityGroupBindings'] = secgroups
data.update({'primaryBackendNetworkComponent': pbnc})
if userdata:
data['userData'] = [{'value': userdata}]
if nic_speed:
data['networkComponents'] = [{'maxSpeed': nic_speed}]
if disks:
data['blockDevices'] = [
{"device": "0", "diskImage": {"capacity": disks[0]}}
]
for dev_id, disk in enumerate(disks[1:], start=2):
data['blockDevices'].append(
{
"device": str(dev_id),
"diskImage": {"capacity": disk}
}
)
if post_uri:
data['postInstallScriptUri'] = post_uri
if ssh_keys:
data['sshKeys'] = [{'id': key_id} for key_id in ssh_keys]
return data
@retry(logger=LOGGER)
def wait_for_transaction(self, instance_id, limit, delay=10):
"""Waits on a VS transaction for the specified amount of time.
This is really just a wrapper for wait_for_ready(pending=True).
Provided for backwards compatibility.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of time to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10.
"""
return self.wait_for_ready(instance_id, limit, delay=delay, pending=True)
def wait_for_ready(self, instance_id, limit=3600, delay=10, pending=False):
"""Determine if a VS is ready and available.
In some cases though, that can mean that no transactions are running.
The default arguments imply a VS is operational and ready for use by
having network connectivity and remote access is available. Setting
``pending=True`` will ensure future API calls against this instance
will not error due to pending transactions such as OS Reloads and
cancellations.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of seconds to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10.
:param bool pending: Wait for pending transactions not related to
provisioning or reloads such as monitoring.
Example::
# Will return once vsi 12345 is ready, or after 10 checks
ready = mgr.wait_for_ready(12345, 10)
"""
now = time.time()
until = now + limit
mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]"
while now <= until:
instance = self.get_instance(instance_id, mask=mask)
if utils.is_ready(instance, pending):
return True
transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName')
snooze = min(delay, until - now)
LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze)
time.sleep(snooze)
now = time.time()
LOGGER.info("Waiting for %d expired.", instance_id)
return False
def verify_create_instance(self, **kwargs):
"""Verifies an instance creation command.
Without actually placing an order.
See :func:`create_instance` for a list of available options.
Example::
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'dedicated': False,
'private': False,
'cpus': 1,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'memory': 1024
}
vsi = mgr.verify_create_instance(**new_vsi)
# vsi will be a SoftLayer_Container_Product_Order_Virtual_Guest
# if your order is correct. Otherwise you will get an exception
print vsi
"""
kwargs.pop('tags', None)
create_options = self._generate_create_dict(**kwargs)
return self.guest.generateOrderTemplate(create_options)
def create_instance(self, **kwargs):
"""Creates a new virtual server instance.
.. warning::
This will add charges to your account
Example::
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'dedicated': False,
'private': False,
'cpus': 1,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'memory': 1024,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
vsi = mgr.create_instance(**new_vsi)
# vsi will have the newly created vsi details if done properly.
print vsi
:param int cpus: The number of virtual CPUs to include in the instance.
:param int memory: The amount of RAM to order.
:param bool hourly: Flag to indicate if this server should be billed hourly (default) or monthly.
:param string hostname: The hostname to use for the new server.
:param string domain: The domain to use for the new server.
:param bool local_disk: Flag to indicate if this should be a local disk (default) or a SAN disk.
:param string datacenter: The short name of the data center in which the VS should reside.
:param string os_code: The operating system to use. Cannot be specified if image_id is specified.
:param int image_id: The GUID of the image to load onto the server. Cannot be specified if os_code is specified.
:param bool dedicated: Flag to indicate if this should be housed on adedicated or shared host (default).
This will incur a fee on your account.
:param int public_vlan: The ID of the public VLAN on which you want this VS placed.
:param list public_security_groups: The list of security group IDs to apply to the public interface
:param list private_security_groups: The list of security group IDs to apply to the private interface
:param int private_vlan: The ID of the private VLAN on which you want this VS placed.
:param list disks: A list of disk capacities for this server.
:param string post_uri: The URI of the post-install script to run after reload
:param bool private: If true, the VS will be provisioned only with access to the private network.
Defaults to false
:param list ssh_keys: The SSH keys to add to the root user
:param int nic_speed: The port speed to set
:param string tags: tags to set on the VS as a comma separated list
:param string flavor: The key name of the public virtual server flavor being ordered.
:param int host_id: The host id of a dedicated host to provision a dedicated host virtual server on.
"""
tags = kwargs.pop('tags', None)
inst = self.guest.createObject(self._generate_create_dict(**kwargs))
if tags is not None:
self.set_tags(tags, guest_id=inst['id'])
return inst
@retry(logger=LOGGER)
def set_tags(self, tags, guest_id):
"""Sets tags on a guest with a retry decorator
Just calls guest.setTags, but if it fails from an APIError will retry
"""
self.guest.setTags(tags, id=guest_id)
def create_instances(self, config_list):
"""Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'multi-test',
'datacenter': u'hkg02',
'dedicated': False,
'private': False,
'cpus': 1,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [87634],
'disks': ('100','25'),
'local_disk': True,
'memory': 1024,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi
"""
tags = [conf.pop('tags', None) for conf in config_list]
resp = self.guest.createObjects([self._generate_create_dict(**kwargs)
for kwargs in config_list])
for instance, tag in zip(resp, tags):
if tag is not None:
self.set_tags(tag, guest_id=instance['id'])
return resp
def change_port_speed(self, instance_id, public, speed):
"""Allows you to change the port speed of a virtual server's NICs.
Example::
#change the Public interface to 10Mbps on instance 12345
result = mgr.change_port_speed(instance_id=12345,
public=True, speed=10)
# result will be True or an Exception
:param int instance_id: The ID of the VS
:param bool public: Flag to indicate which interface to change.
True (default) means the public interface.
False indicates the private interface.
:param int speed: The port speed to set.
.. warning::
A port speed of 0 will disable the interface.
"""
if public:
return self.client.call('Virtual_Guest',
'setPublicNetworkInterfaceSpeed',
speed, id=instance_id)
else:
return self.client.call('Virtual_Guest',
'setPrivateNetworkInterfaceSpeed',
speed, id=instance_id)
def _get_ids_from_hostname(self, hostname):
"""List VS ids which match the given hostname."""
results = self.list_instances(hostname=hostname, mask="id")
return [result['id'] for result in results]
def _get_ids_from_ip(self, ip_address): # pylint: disable=inconsistent-return-statements
"""List VS ids which match the given ip address."""
try:
# Does it look like an ip address?
socket.inet_aton(ip_address)
except socket.error:
return []
# Find the VS via ip address. First try public ip, then private
results = self.list_instances(public_ip=ip_address, mask="id")
if results:
return [result['id'] for result in results]
results = self.list_instances(private_ip=ip_address, mask="id")
if results:
return [result['id'] for result in results]
def edit(self, instance_id, userdata=None, hostname=None, domain=None,
notes=None, tags=None):
"""Edit hostname, domain name, notes, and/or the user data of a VS.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string userdata: user data on VS to edit.
If none exist it will be created
:param string hostname: valid hostname
:param string domain: valid domain namem
:param string notes: notes about this particular VS
:param string tags: tags to set on the VS as a comma separated list.
Use the empty string to remove all tags.
:returns: bool -- True or an Exception
Example::
# Change the hostname on instance 12345 to 'something'
result = mgr.edit(instance_id=12345 , hostname="something")
#result will be True or an Exception
"""
obj = {}
if userdata:
self.guest.setUserMetadata([userdata], id=instance_id)
if tags is not None:
self.set_tags(tags, guest_id=instance_id)
if hostname:
obj['hostname'] = hostname
if domain:
obj['domain'] = domain
if notes:
obj['notes'] = notes
if not obj:
return True
return self.guest.editObject(obj, id=instance_id)
def rescue(self, instance_id):
"""Reboot a VSI into the Xen recsue kernel.
:param integer instance_id: the instance ID to rescue
:returns: bool -- True or an Exception
Example::
# Puts instance 12345 into rescue mode
result = mgr.rescue(instance_id=12345)
"""
return self.guest.executeRescueLayer(id=instance_id)
def capture(self, instance_id, name, additional_disks=False, notes=None):
"""Capture one or all disks from a VS to a SoftLayer image.
Parameters set to None will be ignored and not attempted to be updated.
:param integer instance_id: the instance ID to edit
:param string name: name assigned to the image
:param bool additional_disks: set to true to include all additional
attached storage devices
:param string notes: notes about this particular image
:returns: dictionary -- information about the capture transaction.
Example::
name = "Testing Images"
notes = "Some notes about this image"
result = mgr.capture(instance_id=12345, name=name, notes=notes)
"""
vsi = self.client.call(
'Virtual_Guest',
'getObject',
id=instance_id,
mask="""id,
blockDevices[id,device,mountType,
diskImage[id,metadataFlag,type[keyName]]]""")
disks_to_capture = []
for block_device in vsi['blockDevices']:
# We never want metadata disks
if utils.lookup(block_device, 'diskImage', 'metadataFlag'):
continue
# We never want swap devices
type_name = utils.lookup(block_device,
'diskImage',
'type',
'keyName')
if type_name == 'SWAP':
continue
# We never want CD images
if block_device['mountType'] == 'CD':
continue
# Only use the first block device if we don't want additional disks
if not additional_disks and str(block_device['device']) != '0':
continue
disks_to_capture.append(block_device)
return self.guest.createArchiveTransaction(
name, disks_to_capture, notes, id=instance_id)
def upgrade(self, instance_id, cpus=None, memory=None,
nic_speed=None, public=True):
"""Upgrades a VS instance.
Example::
# Upgrade instance 12345 to 4 CPUs and 4 GB of memory
import SoftLayer
client = SoftLayer.create_client_from_env()
mgr = SoftLayer.VSManager(client)
mgr.upgrade(12345, cpus=4, memory=4)
:param int instance_id: Instance id of the VS to be upgraded
:param int cpus: The number of virtual CPUs to upgrade to
of a VS instance.
:param int memory: RAM of the VS to be upgraded to.
:param int nic_speed: The port speed to set
:param bool public: CPU will be in Private/Public Node.
:returns: bool
"""
upgrade_prices = self._get_upgrade_prices(instance_id)
prices = []
for option, value in {'cpus': cpus,
'memory': memory,
'nic_speed': nic_speed}.items():
if not value:
continue
price_id = self._get_price_id_for_upgrade_option(upgrade_prices,
option,
value,
public)
if not price_id:
# Every option provided is expected to have a price
raise exceptions.SoftLayerError(
"Unable to find %s option with value %s" % (option, value))
prices.append({'id': price_id})
maintenance_window = datetime.datetime.now(utils.UTC())
order = {
'complexType': 'SoftLayer_Container_Product_Order_Virtual_Guest_'
'Upgrade',
'prices': prices,
'properties': [{
'name': 'MAINTENANCE_WINDOW',
'value': maintenance_window.strftime("%Y-%m-%d %H:%M:%S%z")
}],
'virtualGuests': [{'id': int(instance_id)}],
}
if prices:
self.client['Product_Order'].placeOrder(order)
return True
return False
def _get_package_items(self):
"""Following Method gets all the item ids related to VS.
Deprecated in favor of _get_upgrade_prices()
"""
warnings.warn("use _get_upgrade_prices() instead",
DeprecationWarning)
mask = [
'description',
'capacity',
'units',
'prices[id,locationGroupId,categories[name,id,categoryCode]]'
]
mask = "mask[%s]" % ','.join(mask)
package_keyname = "CLOUD_SERVER"
package = self.ordering_manager.get_package_by_key(package_keyname)
package_service = self.client['Product_Package']
return package_service.getItems(id=package['id'], mask=mask)
def _get_upgrade_prices(self, instance_id, include_downgrade_options=True):
"""Following Method gets all the price ids related to upgrading a VS.
:param int instance_id: Instance id of the VS to be upgraded
:returns: list
"""
mask = [
'id',
'locationGroupId',
'categories[name,id,categoryCode]',
'item[description,capacity,units]'
]
mask = "mask[%s]" % ','.join(mask)
return self.guest.getUpgradeItemPrices(include_downgrade_options, id=instance_id, mask=mask)
# pylint: disable=inconsistent-return-statements
def _get_price_id_for_upgrade_option(self, upgrade_prices, option, value, public=True):
"""Find the price id for the option and value to upgrade. This
:param list upgrade_prices: Contains all the prices related to a VS upgrade
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node.
"""
option_category = {
'memory': 'ram',
'cpus': 'guest_core',
'nic_speed': 'port_speed'
}
category_code = option_category.get(option)
for price in upgrade_prices:
if price.get('categories') is None or price.get('item') is None:
continue
product = price.get('item')
is_private = (product.get('units') == 'PRIVATE_CORE'
or product.get('units') == 'DEDICATED_CORE')
for category in price.get('categories'):
if not (category.get('categoryCode') == category_code
and str(product.get('capacity')) == str(value)):
continue
if option == 'cpus':
# Public upgrade and public guest_core price
if public and not is_private:
return price.get('id')
# Private upgrade and private guest_core price
elif not public and is_private:
return price.get('id')
elif option == 'nic_speed':
if 'Public' in product.get('description'):
return price.get('id')
else:
return price.get('id')
# pylint: disable=inconsistent-return-statements
def _get_price_id_for_upgrade(self, package_items, option, value, public=True):
"""Find the price id for the option and value to upgrade.
Deprecated in favor of _get_price_id_for_upgrade_option()
:param list package_items: Contains all the items related to an VS
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node.
"""
warnings.warn("use _get_price_id_for_upgrade_option() instead",
DeprecationWarning)
option_category = {
'memory': 'ram',
'cpus': 'guest_core',
'nic_speed': 'port_speed'
}
category_code = option_category[option]
for item in package_items:
is_private = (item.get('units') == 'PRIVATE_CORE')
for price in item['prices']:
if 'locationGroupId' in price and price['locationGroupId']:
# Skip location based prices
continue
if 'categories' not in price:
continue
categories = price['categories']
for category in categories:
if not (category['categoryCode'] == category_code
and str(item['capacity']) == str(value)):
continue
if option == 'cpus':
if public and not is_private:
return price['id']
elif not public and is_private:
return price['id']
elif option == 'nic_speed':
if 'Public' in item['description']:
return price['id']
else:
return price['id']
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
import collections
import copy
import os
import unicodedata
from typing import Optional
from ...utils import logging
from ..bert.tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/vocab.txt",
"cl-tohoku/bert-base-japanese-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/vocab.txt",
"cl-tohoku/bert-base-japanese-char": "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/vocab.txt",
"cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"cl-tohoku/bert-base-japanese": 512,
"cl-tohoku/bert-base-japanese-whole-word-masking": 512,
"cl-tohoku/bert-base-japanese-char": 512,
"cl-tohoku/bert-base-japanese-char-whole-word-masking": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"cl-tohoku/bert-base-japanese": {
"do_lower_case": False,
"word_tokenizer_type": "mecab",
"subword_tokenizer_type": "wordpiece",
},
"cl-tohoku/bert-base-japanese-whole-word-masking": {
"do_lower_case": False,
"word_tokenizer_type": "mecab",
"subword_tokenizer_type": "wordpiece",
},
"cl-tohoku/bert-base-japanese-char": {
"do_lower_case": False,
"word_tokenizer_type": "mecab",
"subword_tokenizer_type": "character",
},
"cl-tohoku/bert-base-japanese-char-whole-word-masking": {
"do_lower_case": False,
"word_tokenizer_type": "mecab",
"subword_tokenizer_type": "character",
},
}
class BertJapaneseTokenizer(BertTokenizer):
r"""
Construct a BERT tokenizer for Japanese text, based on a MecabTokenizer.
Args:
vocab_file (:obj:`str`):
Path to a one-wordpiece-per-line vocabulary file.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lower case the input. Only has an effect when do_basic_tokenize=True.
do_word_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do word tokenization.
do_subword_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do subword tokenization.
word_tokenizer_type (:obj:`str`, `optional`, defaults to :obj:`"basic"`):
Type of word tokenizer.
subword_tokenizer_type (:obj:`str`, `optional`, defaults to :obj:`"wordpiece"`):
Type of subword tokenizer.
mecab_kwargs (:obj:`str`, `optional`):
Dictionary passed to the :obj:`MecabTokenizer` constructor.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=False,
do_word_tokenize=True,
do_subword_tokenize=True,
word_tokenizer_type="basic",
subword_tokenizer_type="wordpiece",
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
mecab_kwargs=None,
**kwargs
):
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
do_lower_case=do_lower_case,
do_word_tokenize=do_word_tokenize,
do_subword_tokenize=do_subword_tokenize,
word_tokenizer_type=word_tokenizer_type,
subword_tokenizer_type=subword_tokenizer_type,
never_split=never_split,
mecab_kwargs=mecab_kwargs,
**kwargs,
)
# ^^ We call the grandparent's init, not the parent's.
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_word_tokenize = do_word_tokenize
self.word_tokenizer_type = word_tokenizer_type
self.lower_case = do_lower_case
self.never_split = never_split
self.mecab_kwargs = copy.deepcopy(mecab_kwargs)
if do_word_tokenize:
if word_tokenizer_type == "basic":
self.word_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False
)
elif word_tokenizer_type == "mecab":
self.word_tokenizer = MecabTokenizer(
do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {})
)
else:
raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.")
self.do_subword_tokenize = do_subword_tokenize
self.subword_tokenizer_type = subword_tokenizer_type
if do_subword_tokenize:
if subword_tokenizer_type == "wordpiece":
self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
elif subword_tokenizer_type == "character":
self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token)
else:
raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.")
@property
def do_lower_case(self):
return self.lower_case
def __getstate__(self):
state = dict(self.__dict__)
if self.word_tokenizer_type == "mecab":
del state["word_tokenizer"]
return state
def __setstate__(self, state):
self.__dict__ = state
if self.word_tokenizer_type == "mecab":
self.word_tokenizer = MecabTokenizer(
do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {})
)
def _tokenize(self, text):
if self.do_word_tokenize:
tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens)
else:
tokens = [text]
if self.do_subword_tokenize:
split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)]
else:
split_tokens = tokens
return split_tokens
class MecabTokenizer:
"""Runs basic tokenization with MeCab morphological parser."""
def __init__(
self,
do_lower_case=False,
never_split=None,
normalize_text=True,
mecab_dic: Optional[str] = "ipadic",
mecab_option: Optional[str] = None,
):
"""
Constructs a MecabTokenizer.
Args:
**do_lower_case**: (`optional`) boolean (default True)
Whether to lowercase the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
:func:`PreTrainedTokenizer.tokenize`) List of tokens not to split.
**normalize_text**: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
**mecab_dic**: (`optional`) string (default "ipadic")
Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary,
set this option to `None` and modify `mecab_option`.
**mecab_option**: (`optional`) string
String passed to MeCab constructor.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
self.normalize_text = normalize_text
try:
import fugashi
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install fugashi to use MecabTokenizer. "
"See https://pypi.org/project/fugashi/ for installation."
)
mecab_option = mecab_option or ""
if mecab_dic is not None:
if mecab_dic == "ipadic":
try:
import ipadic
except ModuleNotFoundError as error:
raise error.__class__(
"The ipadic dictionary is not installed. "
"See https://github.com/polm/ipadic-py for installation."
)
dic_dir = ipadic.DICDIR
elif mecab_dic == "unidic_lite":
try:
import unidic_lite
except ModuleNotFoundError as error:
raise error.__class__(
"The unidic_lite dictionary is not installed. "
"See https://github.com/polm/unidic-lite for installation."
)
dic_dir = unidic_lite.DICDIR
elif mecab_dic == "unidic":
try:
import unidic
except ModuleNotFoundError as error:
raise error.__class__(
"The unidic dictionary is not installed. "
"See https://github.com/polm/unidic-py for installation."
)
dic_dir = unidic.DICDIR
if not os.path.isdir(dic_dir):
raise RuntimeError(
"The unidic dictionary itself is not found."
"See https://github.com/polm/unidic-py for installation."
)
else:
raise ValueError("Invalid mecab_dic is specified.")
mecabrc = os.path.join(dic_dir, "mecabrc")
mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option
self.mecab = fugashi.GenericTagger(mecab_option)
def tokenize(self, text, never_split=None, **kwargs):
"""Tokenizes a piece of text."""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
never_split = self.never_split + (never_split if never_split is not None else [])
tokens = []
for word in self.mecab(text):
token = word.surface
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
return tokens
class CharacterTokenizer:
"""Runs Character tokenization."""
def __init__(self, vocab, unk_token, normalize_text=True):
"""
Constructs a CharacterTokenizer.
Args:
**vocab**:
Vocabulary object.
**unk_token**: str
A special symbol for out-of-vocabulary token.
**normalize_text**: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
"""
self.vocab = vocab
self.unk_token = unk_token
self.normalize_text = normalize_text
def tokenize(self, text):
"""
Tokenizes a piece of text into characters.
For example, :obj:`input = "apple""` wil return as output :obj:`["a", "p", "p", "l", "e"]`.
Args:
text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
Returns:
A list of characters.
"""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
output_tokens = []
for char in text:
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
output_tokens.append(char)
return output_tokens
| |
import logging
import asyncio
import pickle
import ssl
from message_types import measurement_msg
_logger = logging.getLogger(__name__)
def create_ssl_context(ssl_dict):
"""
loads the ssl certificate for secure communication
:param ssl_dict: dictionary consisting of certification file, key
keys: certFile, keyFile
:returns ssl.SSLContext
"""
_logger.debug("#debug:loading-ssl-certificates")
try:
# choosing the version of the SSL Protocol
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.options |= ssl.OP_NO_SSLv2
ssl_context.options |= ssl.OP_NO_SSLv3
ssl_context.load_cert_chain(certfile=ssl_dict["certFile"], keyfile=ssl_dict["keyFile"])
ssl_context.verify_mode = ssl.CERT_NONE
ssl_context.check_hostname = False
_logger.info("#info:ssl-certificates-loaded!")
return ssl_context
except ssl.SSLError as e:
_logger.exception(e)
_logger.error("#error:could-not-load-the-ssl-certificates")
raise e
class CommunicationModule():
"""
This class is responsible for handling communications with Server
_to_be_acknowledged: is a dictionary, where keys are message_id and values
are the message itself, which is a list of dictionaries, each dictionary represents
a measurement
_to_be_sent: which is a list of dictionaries, each dictionary represents
a measurement
"""
def __init__(self, server_host, server_port, ssl_context):
self._server_host = server_host
self._server_port = server_port
self._ssl_context = ssl_context
self._reader = None
self._writer = None
# dictionary of Messages which
# have not been acknowledged
self._to_be_acknowledged = {}
# the length of each message, number of
# measurements to be send together
self._WINDOW_SIZE = 3
# the message that will be sent to the server
# a list of measurements, size : _WINDOW_SIZE
# each time this list is reach to its limit, a
# message will be sent and list will be emptied
self._to_be_sent = []
# giving ids to the messages
self._MSG_COUNTER = 0
@asyncio.coroutine
def connect(self):
# get a connection
try:
self._reader, self._writer = yield from \
asyncio.open_connection(
self._server_host, self._server_port,
ssl=self._ssl_context)
except Exception as e:
_logger.exception(e)
# the exception will be handled on
# the next level (report method of
# the reporter class)
raise e
@asyncio.coroutine
def send(self, msg):
"""
called by Reporter
sends the measurement alongside the previously buffered messages if the limit (_WINDOW_SIZE)
has been reached(by calling send_message), otherwise appends it to the buffered list (to_be_sent)
:param msg: an instance of an object representing a measurement
"""
try:
msg = msg.__dict__
self._to_be_sent.append(msg)
# check if the _message can be send
if len(self._to_be_sent) >= self._WINDOW_SIZE:
# getting the first _WinDOW_SIZE items
message = self._to_be_sent[0:self._WINDOW_SIZE]
# creating the message, giving a new id
# to the message, and send it to the server
self._MSG_COUNTER += 1
yield from self.send_measurement(self._MSG_COUNTER, message)
response = yield from self.receive_message(self._MSG_COUNTER)
# adding the message to the to_be_acknowledged dictionary
self._to_be_acknowledged[self._MSG_COUNTER] = message
# removing message from _to_be_sent list
self._to_be_sent = self._to_be_sent[self._WINDOW_SIZE:]
if response:
yield from self.handle_response(response)
else:
_logger.warn("#warn:no-ack-received-from-server-for-msg-%s" % self._MSG_COUNTER)
else:
_logger.debug("#debug:msg-will-be-send-later-len(to_be_sent):%s" % len(self._to_be_sent))
except Exception as e:
_logger.error("#error:error-occurred-while-sending-the-message:%s" % msg)
_logger.exception(e)
# to be handled by the upper class Reporter
raise e
@asyncio.coroutine
def send_measurement(self, msg_id, msg):
"""
sends a list of measurements
:param msg_id: int
:param msg: list of dictionaries
:return:
"""
message = measurement_msg.MeasurementMessage(id=msg_id, data=msg)
if msg:
# when we are sending a measurement and msg is not None
_logger.debug('#debug:sending-message-with-id-:%s-and-size:%s' % (msg_id, len(msg)))
yield from self.send_message(message)
@asyncio.coroutine
def receive_message(self, msg_id):
"""
waits to receive response by the other side
:param msg_id: int , the msg we are waiting for its response
:return: (bytes) response sent by server
"""
try:
data = yield from asyncio.wait_for(self._reader.read(1000), timeout=3)
return data
except asyncio.TimeoutError:
_logger.warn("#warn:timeout-reached-while-waiting-for-ack-msg:%s" % msg_id)
@asyncio.coroutine
def send_message(self, message):
"""
Sends a message to the server
:param msg_id: (int) id of the message
:param message: an inherited instance of GeneralMessage
(@see general_message.GeneralMessage)
"""
# packing the message into bytes
byte_message = pickle.dumps(message)
# sending the message to the server
self._writer.write(byte_message)
yield from self._writer.drain()
@asyncio.coroutine
def handle_response(self, message):
"""
handles a message sent by the server
:param message: (bytes)
:return:
"""
try:
message = pickle.loads(message)
# message must be a subclass of GeneralMessage
_logger.debug("received-msg-of-type-%s: " % message.get_type())
if message.get_type() == 'ack':
yield from self.handle_ack(message)
elif message.get_type() == 'request':
yield from self.handle_request(message)
else:
_logger.warn("#warn:unknown-message-type-received:%s" % message.get_type())
except pickle.PickleError:
_logger.error("#error:Pickling-error-while-analyzing-message:%s" % message)
except KeyError:
_logger.warn("#debug:-corrupted-message-received-%s" % message)
except AttributeError:
_logger.error("#error:message-is-corrupted-%s" % message)
@asyncio.coroutine
def handle_ack(self, ack):
"""
analyzes the acknowledgment sent by the server
:param ack: an instance of type AckknowledgmentMessage
:return:
"""
try:
_logger.debug("#debug:ack:%s" % ack)
# checking and removing the delivered message from
# our waiting list
if ack.get_success() in self._to_be_acknowledged:
self._to_be_acknowledged.pop(ack.get_success())
else:
_logger.warn("#debug:acknowledgment-received-for-non-existing-message-id:%s" % ack.get_success())
_logger.debug("#debug:to_be_acknowledged-list:%s" % self._to_be_acknowledged)
# if the server asked for a specific
# msg id send the wanted message
if ack.get_wanted():
# send the msg if we have it in buffer
if ack.get_wanted() in self._to_be_acknowledged:
# sending the message to the server
_logger.debug("#debug:sending-wanted-message-id: %s" % ack.get_wanted())
yield from self.send_measurement(ack.get_wanted(), self._to_be_acknowledged[ack.get_wanted()])
response = yield from self.receive_message(ack.get_wanted)
yield from self.handle_response(response)
# the msg asked by server does not exists
# in buffer
else:
_logger.warn("#debug:acknowledgment-received-for-non-existing-message-id:%s" % ack.get_wanted())
_logger.debug("#debug:to_be_acknowledged-list:%s" % self._to_be_acknowledged)
# sending None for this message_id
# server will stop requesting for this id
yield from self.send_measurement(ack.get_wanted(), None)
except pickle.PickleError:
_logger.error("#error:Pickleing-error-while-analyzing-ack:%s" % ack)
except KeyError:
_logger.warn("#debug:-corrupted-ack-received-%s" % ack)
@asyncio.coroutine
def handle_request(self, msg):
"""
handles a request for getting message counter of the client by
the server, sends the server the news value for the _MSG_COUNTER
:param msg: an instance of type requests.Request message type
:return:
"""
if msg.get_request() == 'GET_MSG_COUNTER':
msg.set_response(self._MSG_COUNTER)
yield from self.send_message(msg)
def disconnect(self):
_logger.info("#info:disconnecting-the-communication-module...")
self._writer.close()
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import base64
import datetime
import hashlib
import imghdr
import json
import os
import random
import re
import string
import StringIO
import time
import unicodedata
import urllib
import urlparse
import zipfile
import yaml
import feconf # pylint: disable=relative-import
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
enums = dict(zip(sequential, sequential), **names)
return type('Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/."""
with open(filepath, mode) as f:
return f.read() if raw_bytes else f.read().decode('utf-8')
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: a full path to the exploration root directory.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, dirs, files in os.walk(dir_path):
for directory in dirs:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
if filepath.endswith('.DS_Store'):
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
continue
if yaml_content is not None:
raise Exception('More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_exploration_components_from_zip(zip_file_contents):
"""Gets the (yaml, assets) from the contents of an exploration zip file.
Args:
zip_file_contents: a string of raw bytes representing the contents of
a zip file that comprises the exploration.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
memfile = StringIO.StringIO()
memfile.write(zip_file_contents)
zf = zipfile.ZipFile(memfile, 'r')
yaml_content = None
assets_list = []
for filepath in zf.namelist():
if filepath.startswith('assets/'):
assets_list.append('/'.join(filepath.split('/')[1:]),
zf.read(filepath))
else:
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified for zip file')
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single file not in assets/, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = zf.read(filepath)
if yaml_content is None:
raise Exception('No yaml file specified in zip file contents')
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string."""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible."""
return unicodedata.normalize(
'NFKD', unicode(input_string)).encode('ascii', 'ignore')
def yaml_from_dict(dictionary, width=80):
"""Gets the YAML representation of a dict."""
return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string."""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except yaml.YAMLError as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict."""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound)."""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list."""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_binary_to_data_url(content):
"""Converts a png image string (represented by 'content') to a data URL."""
if imghdr.what(None, content) == 'png':
return 'data:image/png;base64,%s' % urllib.quote(
content.encode('base64'))
else:
raise Exception('The given string does not represent a PNG image.')
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL."""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return convert_png_binary_to_data_url(file_contents)
def camelcase_to_hyphenated(camelcase_str):
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL."""
if not isinstance(param_name, basestring):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = urlparse.urlsplit(url)
query_params = urlparse.parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urllib.urlencode(query_params, doseq=True)
return urlparse.urlunsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash."""
if not isinstance(input_string, basestring):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
encoded_string = base64.urlsafe_b64encode(
hashlib.sha1(input_string.encode('utf-8')).digest())
return encoded_string[:max_length]
def base64_from_int(value):
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: An object of type datetime.datetime.
"""
seconds = time.mktime(datetime_obj.timetuple()) * 1000
return seconds + datetime_obj.microsecond / 1000.0
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
"""
return time.strftime('%B %d %H:%M:%S', time.gmtime(time_msec / 1000.0))
def generate_random_string(length):
return base64.urlsafe_b64encode(os.urandom(length))
def generate_new_session_id():
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines."""
path = base_path
for component in path_components:
if component.startswith('/'):
path = component
elif path == '' or path.endswith('/'):
path += component
else:
path += '/%s' % component
return path
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or
(not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash * initial_slashes + path
return path or dot
def require_valid_name(name, name_type, allow_empty=False):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
allow_empty: if True, empty strings are allowed.
"""
if not isinstance(name, basestring):
raise ValidationError('%s must be a string.' % name_type)
if allow_empty and name == '':
return
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in feconf.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_hex_color_for_category(category):
return (
feconf.CATEGORIES_TO_COLORS[category]
if category in feconf.CATEGORIES_TO_COLORS
else feconf.DEFAULT_COLOR)
def get_thumbnail_icon_url_for_category(category):
icon_name = (
category if category in feconf.ALL_CATEGORIES
else feconf.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/images/subjects/%s.svg' % icon_name.replace(' ', '')
def _get_short_language_description(full_language_description):
"""Given one of the descriptions in feconf.ALL_LANGUAGE_CODES, generates
the corresponding short description.
"""
if ' (' not in full_language_description:
return full_language_description
else:
ind = full_language_description.find(' (')
return full_language_description[:ind]
def get_all_language_codes_and_names():
return [{
'code': lc['code'],
'name': _get_short_language_description(lc['description']),
} for lc in feconf.ALL_LANGUAGE_CODES]
def unescape_encoded_uri_component(escaped_string):
"""Unescape a string that is encoded with encodeURIComponent."""
return urllib.unquote(escaped_string).decode('utf-8')
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class ListManagementImageListsOperations(object):
"""ListManagementImageListsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_details(
self, list_id, custom_headers=None, raw=False, **operation_config):
"""Returns the details of the image list with list Id equal to list Id
passed.
:param list_id: List Id of the image list.
:type list_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageList or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.contentmoderator.models.ImageList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.get_details.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_details.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists/{listId}'}
def delete(
self, list_id, custom_headers=None, raw=False, **operation_config):
"""Deletes image list with the list Id equal to list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
delete.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists/{listId}'}
def update(
self, list_id, content_type, body, custom_headers=None, raw=False, **operation_config):
"""Updates an image list with list Id equal to list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param content_type: The content type.
:type content_type: str
:param body: Schema of the body.
:type body:
~azure.cognitiveservices.vision.contentmoderator.models.Body
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageList or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.contentmoderator.models.ImageList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct body
body_content = self._serialize.body(body, 'Body')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists/{listId}'}
def create(
self, content_type, body, custom_headers=None, raw=False, **operation_config):
"""Creates an image list.
:param content_type: The content type.
:type content_type: str
:param body: Schema of the body.
:type body:
~azure.cognitiveservices.vision.contentmoderator.models.Body
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ImageList or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.contentmoderator.models.ImageList or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct body
body_content = self._serialize.body(body, 'Body')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImageList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists'}
def get_all_image_lists(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the Image Lists.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.cognitiveservices.vision.contentmoderator.models.ImageList]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.get_all_image_lists.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[ImageList]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_all_image_lists.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists'}
def refresh_index_method(
self, list_id, custom_headers=None, raw=False, **operation_config):
"""Refreshes the index of the list with list Id equal to list Id passed.
:param list_id: List Id of the image list.
:type list_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RefreshIndex or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.vision.contentmoderator.models.RefreshIndex
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = self.refresh_index_method.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'listId': self._serialize.url("list_id", list_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RefreshIndex', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
refresh_index_method.metadata = {'url': '/contentmoderator/lists/v1.0/imagelists/{listId}/RefreshIndex'}
| |
# Convert piecewise cubic into piecewise clothoid representation.
from math import *
import clothoid
import pcorn
import tocubic
import offset
def read_bz(f):
result = []
for l in f.xreadlines():
s = l.split()
if len(s) > 0:
cmd = s[-1]
#print s[:-1], cmd
if cmd == 'm':
sp = []
result.append(sp)
curpt = [float(x) for x in s[0:2]]
startpt = curpt
elif cmd == 'l':
newpt = [float(x) for x in s[0:2]]
sp.append((curpt, newpt))
curpt = newpt
elif cmd == 'c':
c1 = [float(x) for x in s[0:2]]
c2 = [float(x) for x in s[2:4]]
newpt = [float(x) for x in s[4:6]]
sp.append((curpt, c1, c2, newpt))
curpt = newpt
return result
def plot_bzs(bzs, z0, scale, fancy = False):
for sp in bzs:
for i in range(len(sp)):
bz = sp[i]
tocubic.plot_bz(bz, z0, scale, i == 0)
print 'stroke'
if fancy:
for i in range(len(sp)):
bz = sp[i]
x0, y0 = z0[0] + scale * bz[0][0], z0[1] + scale * bz[0][1]
print 'gsave', x0, y0, 'translate circle fill grestore'
if len(bz) == 4:
x1, y1 = z0[0] + scale * bz[1][0], z0[1] + scale * bz[1][1]
x2, y2 = z0[0] + scale * bz[2][0], z0[1] + scale * bz[2][1]
x3, y3 = z0[0] + scale * bz[3][0], z0[1] + scale * bz[3][1]
print 'gsave 0.5 setlinewidth', x0, y0, 'moveto'
print x1, y1, 'lineto stroke'
print x2, y2, 'moveto'
print x3, y3, 'lineto stroke grestore'
print 'gsave', x1, y1, 'translate 0.75 dup scale circle fill grestore'
print 'gsave', x2, y2, 'translate 0.75 dup scale circle fill grestore'
print 'gsave', x3, y3, 'translate 0.75 dup scale circle fill grestore'
def measure_bz_cloth(seg, bz, n = 100):
bz_arclen = tocubic.bz_arclength_rk4(bz)
arclen_ratio = seg.arclen / bz_arclen
dbz = tocubic.bz_deriv(bz)
def measure_derivs(x, ys):
dx, dy = tocubic.bz_eval(dbz, x)
ds = hypot(dx, dy)
s = ys[0] * arclen_ratio
dscore = ds * (tocubic.mod_2pi(atan2(dy, dx) - seg.th(s)) ** 2)
#print s, atan2(dy, dx), seg.th(s)
return [ds, dscore]
dt = 1./n
t = 0
ys = [0, 0]
for i in range(n):
dydx = measure_derivs(t, ys)
tocubic.rk4(ys, dydx, t, dt, measure_derivs)
t += dt
return ys[1]
def cubic_bz_to_pcorn(bz, thresh):
dx = bz[3][0] - bz[0][0]
dy = bz[3][1] - bz[0][1]
dx1 = bz[1][0] - bz[0][0]
dy1 = bz[1][1] - bz[0][1]
dx2 = bz[3][0] - bz[2][0]
dy2 = bz[3][1] - bz[2][1]
chth = atan2(dy, dx)
th0 = tocubic.mod_2pi(chth - atan2(dy1, dx1))
th1 = tocubic.mod_2pi(atan2(dy2, dx2) - chth)
seg = pcorn.Segment(bz[0], bz[3], th0, th1)
err = measure_bz_cloth(seg, bz)
if err < thresh:
return [seg]
else:
# de Casteljau
x01, y01 = 0.5 * (bz[0][0] + bz[1][0]), 0.5 * (bz[0][1] + bz[1][1])
x12, y12 = 0.5 * (bz[1][0] + bz[2][0]), 0.5 * (bz[1][1] + bz[2][1])
x23, y23 = 0.5 * (bz[2][0] + bz[3][0]), 0.5 * (bz[2][1] + bz[3][1])
xl2, yl2 = 0.5 * (x01 + x12), 0.5 * (y01 + y12)
xr1, yr1 = 0.5 * (x12 + x23), 0.5 * (y12 + y23)
xm, ym = 0.5 * (xl2 + xr1), 0.5 * (yl2 + yr1)
bzl = [bz[0], (x01, y01), (xl2, yl2), (xm, ym)]
bzr = [(xm, ym), (xr1, yr1), (x23, y23), bz[3]]
segs = cubic_bz_to_pcorn(bzl, 0.5 * thresh)
segs.extend(cubic_bz_to_pcorn(bzr, 0.5 * thresh))
return segs
def bzs_to_pcorn(bzs, thresh = 1e-9):
result = []
for sp in bzs:
rsp = []
for bz in sp:
if len(bz) == 2:
dx = bz[1][0] - bz[0][0]
dy = bz[1][1] - bz[0][1]
th = atan2(dy, dx)
rsp.append(pcorn.Segment(bz[0], bz[1], 0, 0))
else:
rsp.extend(cubic_bz_to_pcorn(bz, thresh))
result.append(rsp)
return result
def plot_segs(segs):
for i in range(len(segs)):
seg = segs[i]
if i == 0:
print seg.z0[0], seg.z0[1], 'moveto'
print seg.z1[0], seg.z1[1], 'lineto'
print 'stroke'
for i in range(len(segs)):
seg = segs[i]
if i == 0:
print 'gsave', seg.z0[0], seg.z0[1], 'translate circle fill grestore'
print 'gsave', seg.z1[0], seg.z1[1], 'translate circle fill grestore'
import sys
def test_to_pcorn():
C1 = 0.55228
bz = [(100, 100), (100 + 400 * C1, 100), (500, 500 - 400 * C1), (500, 500)]
for i in range(0, 13):
thresh = .1 ** i
segs = cubic_bz_to_pcorn(bz, thresh)
plot_segs(segs)
print >> sys.stderr, thresh, len(segs)
print '0 20 translate'
if __name__ == '__main__':
f = file(sys.argv[1])
bzs = read_bz(f)
rsps = bzs_to_pcorn(bzs, 1)
#print rsps
tocubic.plot_prolog()
print 'grestore'
print '1 -1 scale 0 -720 translate'
print '/ss 1.5 def'
print '/circle { ss 0 moveto currentpoint exch ss sub exch ss 0 360 arc } bind def'
tot = 0
for segs in rsps:
curve = pcorn.Curve(segs)
#curve = offset.offset(curve, 10)
print '%', curve.arclen
print '%', curve.sstarts
if 0:
print 'gsave 1 0 0 setrgbcolor'
cmd = 'moveto'
for i in range(100):
s = i * .01 * curve.arclen
x, y = curve.xy(s)
th = curve.th(s)
sth = 5 * sin(th)
cth = 5 * cos(th)
print x, y, cmd
cmd = 'lineto'
print 'closepath stroke grestore'
for i in range(100):
s = i * .01 * curve.arclen
x, y = curve.xy(s)
th = curve.th(s)
sth = 5 * sin(th)
cth = 5 * cos(th)
if 0:
print x - cth, y - sth, 'moveto'
print x + cth, y + sth, 'lineto stroke'
if 1:
for s in curve.find_breaks():
print 'gsave 0 1 0 setrgbcolor'
x, y = curve.xy(s)
print x, y, 'translate 2 dup scale circle fill'
print 'grestore'
#plot_segs(segs)
print 'gsave 0 0 0 setrgbcolor'
optim = 3
thresh = 1e-2
new_bzs = tocubic.pcorn_curve_to_bzs(curve, optim, thresh)
tot += len(new_bzs)
plot_bzs([new_bzs], (0, 0), 1, True)
print 'grestore'
print 'grestore'
print '/Helvetica 12 selectfont'
print '36 720 moveto (thresh=%g optim=%d) show' % (thresh, optim)
print '( tot segs=%d) show' % tot
print 'showpage'
#plot_bzs(bzs, (100, 100), 1)
| |
# encoding: utf-8
"""
operational/__init__.py
Created by Thomas Mangin on 2013-09-01.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
from struct import pack
from struct import unpack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.open.routerid import RouterID
from exabgp.bgp.message import Message
# ========================================================================= Type
#
MAX_ADVISORY = 2048 # 2K
class Type (int):
def pack (self):
return pack('!H',self)
def extract (self):
return [pack('!H',self)]
def __len__ (self):
return 2
def __str__ (self):
pass
# ================================================================== Operational
#
class Operational (Message):
ID = Message.ID.OPERATIONAL
TYPE = chr(Message.ID.OPERATIONAL)
registered_operational = dict()
has_family = False
has_routerid = False
is_fault = False
class ID:
__slots__ = []
# ADVISE
ADM = 0x01 # 01: Advisory Demand Message
ASM = 0x02 # 02: Advisory Static Message
# STATE
RPCQ = 0x03 # 03: Reachable Prefix Count Request
RPCP = 0x04 # 04: Reachable Prefix Count Reply
APCQ = 0x05 # 05: Adj-Rib-Out Prefix Count Request
APCP = 0x06 # 06: Adj-Rib-Out Prefix Count Reply
LPCQ = 0x07 # 07: BGP Loc-Rib Prefix Count Request
LPCP = 0x08 # 08: BGP Loc-Rib Prefix Count Reply
SSQ = 0x09 # 09: Simple State Request
# DUMP
DUP = 0x0A # 10: Dropped Update Prefixes
MUP = 0x0B # 11: Malformed Update Prefixes
MUD = 0x0C # 12: Malformed Update Dump
SSP = 0x0D # 13: Simple State Response
# CONTROL
MP = 0xFFFE # 65534: Max Permitted
NS = 0xFFFF # 65535: Not Satisfied
def __init__ (self,what):
Message.__init__(self)
self.what = Type(what)
def _message (self,data):
return Message._message(self,"%s%s%s" % (
self.what.pack(),
pack('!H',len(data)),
data
))
def __str__ (self):
return self.extensive()
def extensive (self):
return 'operational %s' % self.name
@classmethod
def register_operational (cls):
cls.registered_operational[cls.code] = (cls.category,cls)
@classmethod
def unpack_message (cls,data,negotiated):
what = Type(unpack('!H',data[0:2])[0])
length = unpack('!H',data[2:4])[0]
decode,klass = cls.registered_operational.get(what,('unknown',None))
if decode == 'advisory':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
data = data[7:length+4]
return klass(afi,safi,data)
elif decode == 'query':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
routerid = RouterID.unpack(data[7:11])
sequence = unpack('!L',data[11:15])[0]
return klass(afi,safi,routerid,sequence)
elif decode == 'counter':
afi = unpack('!H',data[4:6])[0]
safi = ord(data[6])
routerid = RouterID.unpack(data[7:11])
sequence = unpack('!L',data[11:15])[0]
counter = unpack('!L',data[15:19])[0]
return klass(afi,safi,routerid,sequence,counter)
else:
print 'ignoring ATM this kind of message'
Operational.register_message()
# ============================================================ OperationalFamily
#
class OperationalFamily (Operational):
has_family = True
def __init__ (self,what,afi,safi,data=''):
Operational.__init__(self,what)
self.afi = AFI(afi)
self.safi = SAFI(afi)
self.data = data
def family (self):
return (self.afi,self.safi)
def _message (self,data):
return Operational._message(self,"%s%s%s" % (
self.afi.pack(),
self.safi.pack(),
data
))
def message (self,negotiated):
return self._message(self.data)
# =================================================== SequencedOperationalFamily
#
class SequencedOperationalFamily (OperationalFamily):
__sequence_number = {}
has_routerid = True
def __init__ (self,what,afi,safi,routerid,sequence,data=''):
OperationalFamily.__init__(self,what,afi,safi,data)
self.routerid = routerid if routerid else None
self.sequence = sequence if sequence else None
self._sequence = self.sequence
self._routerid = self.routerid
def message (self,negotiated):
self.sent_routerid = self.routerid if self.routerid else negotiated.sent_open.router_id
if self.sequence is None:
self.sent_sequence = (self.__sequence_number.setdefault(self.routerid,0) + 1) % 0xFFFFFFFF
self.__sequence_number[self.sent_routerid] = self.sent_sequence
else:
self.sent_sequence = self.sequence
return self._message("%s%s%s" % (
self.sent_routerid.pack(),pack('!L',self.sent_sequence),
self.data
))
# =========================================================================== NS
#
class NS:
MALFORMED = 0x01 # Request TLV Malformed
UNSUPPORTED = 0x02 # TLV Unsupported for this neighbor
MAXIMUM = 0x03 # Max query frequency exceeded
PROHIBITED = 0x04 # Administratively prohibited
BUSY = 0x05 # Busy
NOTFOUND = 0x06 # Not Found
class _NS (OperationalFamily):
is_fault = True
def __init__ (self,afi,safi,sequence):
OperationalFamily.__init__(
self,
Operational.ID.NS,
afi,safi,
'%s%s' % (sequence,self.ERROR_SUBCODE)
)
def extensive (self):
return 'operational NS %s %s/%s' % (self.name,self.afi,self.safi)
class Malformed (_NS):
name = 'NS malformed'
ERROR_SUBCODE = '\x00\x01' # pack('!H',MALFORMED)
class Unsupported (_NS):
name = 'NS unsupported'
ERROR_SUBCODE = '\x00\x02' # pack('!H',UNSUPPORTED)
class Maximum (_NS):
name = 'NS maximum'
ERROR_SUBCODE = '\x00\x03' # pack('!H',MAXIMUM)
class Prohibited (_NS):
name = 'NS prohibited'
ERROR_SUBCODE = '\x00\x04' # pack('!H',PROHIBITED)
class Busy (_NS):
name = 'NS busy'
ERROR_SUBCODE = '\x00\x05' # pack('!H',BUSY)
class NotFound (_NS):
name = 'NS notfound'
ERROR_SUBCODE = '\x00\x06' # pack('!H',NOTFOUND)
# ===================================================================== Advisory
#
class Advisory:
class _Advisory (OperationalFamily):
category = 'advisory'
def extensive (self):
return 'operational %s afi %s safi %s "%s"' % (self.name,self.afi,self.safi,self.data)
class ADM (_Advisory):
name = 'ADM'
code = Operational.ID.ADM
def __init__ (self,afi,safi,advisory,routerid=None):
utf8 = advisory.encode('utf-8')
if len(utf8) > MAX_ADVISORY:
utf8 = utf8[:MAX_ADVISORY-3] + '...'.encode('utf-8')
OperationalFamily.__init__(
self,Operational.ID.ADM,
afi,safi,
utf8
)
class ASM (_Advisory):
name = 'ASM'
code = Operational.ID.ASM
def __init__ (self,afi,safi,advisory,routerid=None):
utf8 = advisory.encode('utf-8')
if len(utf8) > MAX_ADVISORY:
utf8 = utf8[:MAX_ADVISORY-3] + '...'.encode('utf-8')
OperationalFamily.__init__(
self,Operational.ID.ASM,
afi,safi,
utf8
)
Advisory.ADM.register_operational()
Advisory.ASM.register_operational()
# a = Advisory.ADM(1,1,'string 1')
# print a.extensive()
# b = Advisory.ASM(1,1,'string 2')
# print b.extensive()
# ======================================================================== Query
#
class Query:
class _Query (SequencedOperationalFamily):
category = 'query'
def __init__ (self,afi,safi,routerid,sequence):
SequencedOperationalFamily.__init__(
self,self.code,
afi,safi,
routerid,sequence
)
def extensive (self):
if self._routerid and self._sequence:
return 'operational %s afi %s safi %s router-id %s sequence %d' % (
self.name,
self.afi,self.safi,
self._routerid,self._sequence,
)
return 'operational %s afi %s safi %s' % (self.name,self.afi,self.safi)
class RPCQ (_Query):
name = 'RPCQ'
code = Operational.ID.RPCQ
class APCQ (_Query):
name = 'APCQ'
code = Operational.ID.APCQ
class LPCQ (_Query):
name = 'LPCQ'
code = Operational.ID.LPCQ
Query.RPCQ.register_operational()
Query.APCQ.register_operational()
Query.LPCQ.register_operational()
# ===================================================================== Response
#
class Response:
class _Counter (SequencedOperationalFamily):
category = 'counter'
def __init__ (self,afi,safi,routerid,sequence,counter):
self.counter = counter
SequencedOperationalFamily.__init__(
self,self.code,
afi,safi,
routerid,sequence,
pack('!L',counter)
)
def extensive (self):
if self._routerid and self._sequence:
return 'operational %s afi %s safi %s router-id %s sequence %d counter %d' % (
self.name,
self.afi,self.safi,
self._routerid,self._sequence,
self.counter
)
return 'operational %s afi %s safi %s counter %d' % (self.name,self.afi,self.safi,self.counter)
class RPCP (_Counter):
name = 'RPCP'
code = Operational.ID.RPCP
class APCP (_Counter):
name = 'APCP'
code = Operational.ID.APCP
class LPCP (_Counter):
name = 'LPCP'
code = Operational.ID.LPCP
Response.RPCP.register_operational()
Response.APCP.register_operational()
Response.LPCP.register_operational()
# c = State.RPCQ(1,1,'82.219.0.1',10)
# print c.extensive()
# d = State.RPCP(1,1,'82.219.0.1',10,10000)
# print d.extensive()
# ========================================================================= Dump
#
class Dump:
pass
| |
from ..Base import Display
from ..Base.Display import DisplayBase
from tornado.web import Application
import tornado
from IPython.core.display import Javascript as JS
from IPython.core.display import display
from time import sleep
import os
import threading
import tempfile
import tornado.testing
from ..Base import Line,Circle,Rectangle,Polygon,Ellipse,Bezier,Text
def getPCode(img):
code = 'size(%d,%d);' % (img.width,img.height)
for layer in img.layers():
for shape in layer.shapes():
code += 'stroke(%d,%d,%d,%d);' % (shape.color + (shape.alpha,))
code += 'strokeWeight(%d);' % shape.width
if shape.antialias :
code += 'smooth();'
else:
code += 'noSmooth();'
if type(shape) is Line :
code += 'line(%d,%d,%d,%d);' % (shape.start + shape.stop)
return code
class NBDisplay(Display.DisplayBase):
_templateFile = "template.html"
"""
"""
init = False
app = None
displayDir = None
staticDir = None
__uidCounter__ = 0
port = None
__cache__ = 20
def name(self):
__doc__ = DisplayBase.name.__doc__
return "NotebookDisplay"
def __init__(self,size = (640,480),type_ = Display.DEFAULT,title = "SimpleCV",fit = Display.SCROLL,delay = .500):
"""
**SUMMARY**
Opens up a display in a window.
d = Display()
**PARAMETERS**
* *size* - the size of the diplay in pixels.
* *type_* - unused tight now
* *title* - the title bar on the display, if there exists one.
* *fit* - unused right now
* *delay* - delay in seconds for which to wait to let the browser load the window
**EXAMPLE**
>>> display = Display(type_ = FULLSCREEN,fit = SCROLL)
>>> img = Image('lenna')
>>> img.save(dispay)
"""
if( not NBDisplay.init):
#check if a tornado server exists, if not , create one
NBDisplay.init = True
#dir in which images are stored
NBDisplay.displayDir = tempfile.mkdtemp()
NBDisplay.staticDir = os.path.dirname(__file__) + os.sep + 'static'
handlers = [
(r"/display/(.*)", tornado.web.StaticFileHandler, {"path": NBDisplay.displayDir}) ,
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": NBDisplay.staticDir})
]
NBDisplay.app = Application(handlers)
NBDisplay.port = tornado.testing.get_unused_port()
NBDisplay.app.listen(NBDisplay.port)
#print NBDisplay.port
#start a thread for tornado
threading.Thread(target=tornado.ioloop.IOLoop.instance().start).start()
#the unique id for each Display
self.__uid__ = NBDisplay.__uidCounter__
NBDisplay.__uidCounter__ += 1
# load the template HTML and replace params in it
fn = os.path.dirname(__file__) + os.sep + NBDisplay._templateFile
tmp = open(fn)
raw_lines = tmp.readlines()
tmp.close()
lines = [line.replace('\n','') for line in raw_lines]
template = ''.join(lines)
template = template.replace('__port__',str(NBDisplay.port))
template = template.replace('__cache__',str(NBDisplay.__cache__))
options = {}
options['width'],options['height'] = size
options['code'] = template
options['title'] = title
options['id'] = self.getUID()
#this pops up a window
self.startStr = r"""
window.disp%(id)s = window.open('','%(title)s','width=%(width)s,height=%(height)s')
window.disp%(id)s.document.write("%(code)s")
""" % options
display(JS(self.startStr))
#otherwise the browser complains if showImage is called right after this
sleep(delay)
def close(self):
Display.DisplayBase.close(self)
command = 'window.disp%d.close()' % (self.getUID() )
display(JS(command))
def showImage(self,img):
"""
**SUMMARY**
Show the image.
**PARAMETERS**
* *img = a SimpleCV Image object to be displayed
**Example**
>>> img = Image('lenna')
>>> d = Display()
>>> d.showImage(img)
"""
# so that only the last few images are saved, newer ones over-write the old ones
img.save(NBDisplay.displayDir + os.sep + str(img.getUID() % NBDisplay.__cache__) + '.png', draw = False )
#print uid%10
options = {}
options['imageID'] = img.getUID()
options['width'] = img.width
options['height'] = img.height
options['displayID'] = self.getUID()
command = "window.disp%(displayID)s.show(%(imageID)s,%(width)s,%(height)s)" % options
#print command
#pass the id to javascript and do the rest there
drawCode = getPCode(img)
drawCommand = "window.disp%d.CODE = '%s'" % (self.getUID(),drawCode)
display(JS(drawCommand))
display(JS(command))
#sleep(1)
def mousePosition(self):
"""
**SUMMARY**
Reutrns the mouse pointer potion as a tuple of (x,y), with respect to
the image coordinates
**RETURNS**
An (x,y) mouse postion tuple .
"""
pass
def mousePositionRaw(self):
"""
**SUMMARY**
Reutrns the mouse pointer potion as a tuple of (x,y), with respect to
the display coordinates
**RETURNS**
An (x,y) mouse postion tuple .
"""
pass
def leftDown(self):
"""
**SUMMARY**
Reutrns the position where the left mouse button last went down,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the left mouse button went down.
"""
def leftUp(self):
"""
**SUMMARY**
Reutrns the position where the left mouse button last went up,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the left mouse button went up.
"""
def rightDown(self):
"""
**SUMMARY**
Reutrns the position where the right mouse button last went down,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the right mouse button went down.
"""
def rightUp(self):
"""
**SUMMARY**
Reutrns the position where the right mouse button last went up,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the right mouse button went up.
"""
def middleDown(self):
"""
**SUMMARY**
Reutrns the position where the middle mouse button last went down,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the middle mouse button went down.
"""
def middleUp(self):
"""
**SUMMARY**
Reutrns the position where the middle mouse button last went up,None
if it didn't since the last time this fucntion was called
**RETURNS**
An (x,y) mouse postion tuple where the middle mouse button went up.
"""
def getUID(self):
return self.__uid__
| |
# -*- coding: utf-8 -*-
from __future__ import division
import re
import copy
import math
import logging
import unicodedata
import six
from elasticsearch import (
Elasticsearch,
RequestError,
NotFoundError,
ConnectionError,
helpers,
)
from framework import sentry
from website import settings
from website.filters import gravatar
from website.models import User, Node
from website.search import exceptions
from website.search.util import build_query
from website.util import sanitize
from website.views import validate_page_num
logger = logging.getLogger(__name__)
# These are the doc_types that exist in the search database
ALIASES = {
'project': 'Projects',
'component': 'Components',
'registration': 'Registrations',
'user': 'Users',
'total': 'Total'
}
# Prevent tokenizing and stop word removal.
NOT_ANALYZED_PROPERTY = {'type': 'string', 'index': 'not_analyzed'}
# Perform stemming on the field it's applied to.
ENGLISH_ANALYZER_PROPERTY = {'type': 'string', 'analyzer': 'english'}
INDEX = settings.ELASTIC_INDEX
try:
es = Elasticsearch(
settings.ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT
)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
logging.getLogger('elasticsearch.trace').setLevel(logging.WARN)
logging.getLogger('urllib3').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
es.cluster.health(wait_for_status='yellow')
except ConnectionError as e:
sentry.log_exception()
sentry.log_message("The SEARCH_ENGINE setting is set to 'elastic', but there "
"was a problem starting the elasticsearch interface. Is "
"elasticsearch running?")
es = None
def requires_search(func):
def wrapped(*args, **kwargs):
if es is not None:
try:
return func(*args, **kwargs)
except ConnectionError:
raise exceptions.SearchUnavailableError('Could not connect to elasticsearch')
except NotFoundError as e:
raise exceptions.IndexNotFoundError(e.error)
except RequestError as e:
if 'ParseException' in e.error:
raise exceptions.MalformedQueryError(e.error)
raise exceptions.SearchException(e.error)
sentry.log_message('Elastic search action failed. Is elasticsearch running?')
raise exceptions.SearchUnavailableError("Failed to connect to elasticsearch")
return wrapped
@requires_search
def get_counts(count_query, clean=True):
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
res = es.search(index=INDEX, doc_type=None, search_type='count', body=count_query)
counts = {x['key']: x['doc_count'] for x in res['aggregations']['counts']['buckets'] if x['key'] in ALIASES.keys()}
counts['total'] = sum([val for val in counts.values()])
return counts
@requires_search
def get_tags(query, index):
query['aggregations'] = {
'tag_cloud': {
'terms': {'field': 'tags'}
}
}
results = es.search(index=index, doc_type=None, body=query)
tags = results['aggregations']['tag_cloud']['buckets']
return tags
@requires_search
def search(query, index=None, doc_type='_all'):
"""Search for a query
:param query: The substring of the username/project name/tag to search for
:param index:
:param doc_type:
:return: List of dictionaries, each containing the results, counts, tags and typeAliases
results: All results returned by the query, that are within the index and search type
counts: A dictionary in which keys are types and values are counts for that type, e.g, count['total'] is the sum of the other counts
tags: A list of tags that are returned by the search query
typeAliases: the doc_types that exist in the search database
"""
index = index or INDEX
tag_query = copy.deepcopy(query)
count_query = copy.deepcopy(query)
for key in ['from', 'size', 'sort']:
try:
del tag_query[key]
del count_query[key]
except KeyError:
pass
tags = get_tags(tag_query, index)
counts = get_counts(count_query, index)
# Run the real query and get the results
raw_results = es.search(index=index, doc_type=doc_type, body=query)
results = [hit['_source'] for hit in raw_results['hits']['hits']]
return_value = {
'results': format_results(results),
'counts': counts,
'tags': tags,
'typeAliases': ALIASES
}
return return_value
def format_results(results):
ret = []
for result in results:
if result.get('category') == 'user':
result['url'] = '/profile/' + result['id']
elif result.get('category') in {'project', 'component', 'registration'}:
result = format_result(result, result.get('parent_id'))
ret.append(result)
return ret
def format_result(result, parent_id=None):
parent_info = load_parent(parent_id)
formatted_result = {
'contributors': result['contributors'],
'wiki_link': result['url'] + 'wiki/',
# TODO: Remove safe_unescape_html when mako html safe comes in
'title': sanitize.safe_unescape_html(result['title']),
'url': result['url'],
'is_component': False if parent_info is None else True,
'parent_title': sanitize.safe_unescape_html(parent_info.get('title')) if parent_info else None,
'parent_url': parent_info.get('url') if parent_info is not None else None,
'tags': result['tags'],
'is_registration': (result['is_registration'] if parent_info is None
else parent_info.get('is_registration')),
'is_retracted': result['is_retracted'],
'pending_retraction': result['pending_retraction'],
'embargo_end_date': result['embargo_end_date'],
'pending_embargo': result['pending_embargo'],
'description': result['description'] if parent_info is None else None,
'category': result.get('category'),
'date_created': result.get('date_created'),
'date_registered': result.get('registered_date'),
'n_wikis': len(result['wikis'])
}
return formatted_result
def load_parent(parent_id):
parent = Node.load(parent_id)
if parent is None:
return None
parent_info = {}
if parent is not None and parent.is_public:
parent_info['title'] = parent.title
parent_info['url'] = parent.url
parent_info['is_registration'] = parent.is_registration
parent_info['id'] = parent._id
else:
parent_info['title'] = '-- private project --'
parent_info['url'] = ''
parent_info['is_registration'] = None
parent_info['id'] = None
return parent_info
COMPONENT_CATEGORIES = set([k for k in Node.CATEGORY_MAP.keys() if not k == 'project'])
def get_doctype_from_node(node):
if node.category in COMPONENT_CATEGORIES:
return 'component'
elif node.is_registration:
return 'registration'
else:
return node.category
@requires_search
def update_node(node, index=None):
index = index or INDEX
from website.addons.wiki.model import NodeWikiPage
category = get_doctype_from_node(node)
if category == 'project':
elastic_document_id = node._id
parent_id = None
else:
try:
elastic_document_id = node._id
parent_id = node.parent_id
except IndexError:
# Skip orphaned components
return
if node.is_deleted or not node.is_public or node.archiving:
delete_doc(elastic_document_id, node)
else:
try:
normalized_title = six.u(node.title)
except TypeError:
normalized_title = node.title
normalized_title = unicodedata.normalize('NFKD', normalized_title).encode('ascii', 'ignore')
elastic_document = {
'id': elastic_document_id,
'contributors': [
{
'fullname': x.fullname,
'url': x.profile_url if x.is_active else None
}
for x in node.visible_contributors
if x is not None
],
'title': node.title,
'normalized_title': normalized_title,
'category': category,
'public': node.is_public,
'tags': [tag._id for tag in node.tags if tag],
'description': node.description,
'url': node.url,
'is_registration': node.is_registration,
'is_retracted': node.is_retracted,
'pending_retraction': node.pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime("%A, %b. %d, %Y") if node.embargo_end_date else False,
'pending_embargo': node.pending_embargo,
'registered_date': node.registered_date,
'wikis': {},
'parent_id': parent_id,
'date_created': node.date_created,
'boost': int(not node.is_registration) + 1, # This is for making registered projects less relevant
}
if not node.is_retracted:
for wiki in [
NodeWikiPage.load(x)
for x in node.wiki_pages_current.values()
]:
elastic_document['wikis'][wiki.page_name] = wiki.raw_text(node)
es.index(index=index, doc_type=category, id=elastic_document_id, body=elastic_document, refresh=True)
def bulk_update_contributors(nodes, index=INDEX):
"""Updates only the list of contributors of input projects
:param nodes: Projects, components or registrations
:param index: Index of the nodes
:return:
"""
actions = []
for node in nodes:
actions.append({
'_op_type': 'update',
'_index': index,
'_id': node._id,
'_type': get_doctype_from_node(node),
'doc': {
'contributors': [
{
'fullname': user.fullname,
'url': user.profile_url if user.is_active else None
} for user in node.visible_contributors
if user is not None
and user.is_active
]
}
})
return helpers.bulk(es, actions)
@requires_search
def update_user(user, index=None):
index = index or INDEX
if not user.is_active:
try:
es.delete(index=index, doc_type='user', id=user._id, refresh=True, ignore=[404])
except NotFoundError:
pass
return
names = dict(
fullname=user.fullname,
given_name=user.given_name,
family_name=user.family_name,
middle_names=user.middle_names,
suffix=user.suffix
)
normalized_names = {}
for key, val in names.items():
if val is not None:
try:
val = six.u(val)
except TypeError:
pass # This is fine, will only happen in 2.x if val is already unicode
normalized_names[key] = unicodedata.normalize('NFKD', val).encode('ascii', 'ignore')
user_doc = {
'id': user._id,
'user': user.fullname,
'normalized_user': normalized_names['fullname'],
'normalized_names': normalized_names,
'names': names,
'job': user.jobs[0]['institution'] if user.jobs else '',
'job_title': user.jobs[0]['title'] if user.jobs else '',
'all_jobs': [job['institution'] for job in user.jobs[1:]],
'school': user.schools[0]['institution'] if user.schools else '',
'all_schools': [school['institution'] for school in user.schools],
'category': 'user',
'degree': user.schools[0]['degree'] if user.schools else '',
'social': user.social_links,
'boost': 2, # TODO(fabianvf): Probably should make this a constant or something
}
es.index(index=index, doc_type='user', body=user_doc, id=user._id, refresh=True)
@requires_search
def delete_all():
delete_index(INDEX)
@requires_search
def delete_index(index):
es.indices.delete(index, ignore=[404])
@requires_search
def create_index(index=None):
'''Creates index with some specified mappings to begin with,
all of which are applied to all projects, components, and registrations.
'''
index = index or INDEX
document_types = ['project', 'component', 'registration', 'user']
project_like_types = ['project', 'component', 'registration']
analyzed_fields = ['title', 'description']
es.indices.create(index, ignore=[400])
for type_ in document_types:
mapping = {'properties': {'tags': NOT_ANALYZED_PROPERTY}}
if type_ in project_like_types:
analyzers = {field: ENGLISH_ANALYZER_PROPERTY
for field in analyzed_fields}
mapping['properties'].update(analyzers)
if type_ == 'user':
fields = {
'job': {
'type': 'string',
'boost': '1',
},
'all_jobs': {
'type': 'string',
'boost': '0.01',
},
'school': {
'type': 'string',
'boost': '1',
},
'all_schools': {
'type': 'string',
'boost': '0.01'
},
}
mapping['properties'].update(fields)
es.indices.put_mapping(index=index, doc_type=type_, body=mapping, ignore=[400, 404])
@requires_search
def delete_doc(elastic_document_id, node, index=None, category=None):
index = index or INDEX
category = category or 'registration' if node.is_registration else node.project_or_component
es.delete(index=index, doc_type=category, id=elastic_document_id, refresh=True, ignore=[404])
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
"""Search for contributors to add to a project using elastic search. Request must
include JSON data with a "query" field.
:param query: The substring of the username to search for
:param page: For pagination, the page number to use for results
:param size: For pagination, the number of results per page
:param exclude: A list of User objects to exclude from the search
:param current_user: A User object of the current user
:return: List of dictionaries, each containing the ID, full name,
most recent employment and education, gravatar URL of an OSF user
"""
start = (page * size)
items = re.split(r'[\s-]+', query)
exclude = exclude or []
normalized_items = []
for item in items:
try:
normalized_item = six.u(item)
except TypeError:
normalized_item = item
normalized_item = unicodedata.normalize('NFKD', normalized_item).encode('ascii', 'ignore')
normalized_items.append(normalized_item)
items = normalized_items
query = " AND ".join('{}*~'.format(re.escape(item)) for item in items) + \
"".join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)
results = search(build_query(query, start=start, size=size), index=INDEX, doc_type='user')
docs = results['results']
pages = math.ceil(results['counts'].get('user', 0) / size)
validate_page_num(page, pages)
users = []
for doc in docs:
# TODO: use utils.serialize_user
user = User.load(doc['id'])
if current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
if user is None:
logger.error('Could not load user {0}'.format(doc['id']))
continue
if user.is_active: # exclude merged, unregistered, etc.
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
users.append({
'fullname': doc['user'],
'id': doc['id'],
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'gravatar_url': gravatar(
user,
use_ssl=True,
size=settings.GRAVATAR_SIZE_ADD_CONTRIBUTOR,
),
'profile_url': user.profile_url,
'registered': user.is_registered,
'active': user.is_active
})
return {
'users': users,
'total': results['counts']['total'],
'pages': pages,
'page': page,
}
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend surcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a surcoind or Surcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the surcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Surcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Surcoin")
return os.path.expanduser("~/.surcoin")
def read_bitcoin_config(dbdir):
"""Read the surcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "surcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a surcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 17904 if testnet else 17903
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the surcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(surcoind):
info = surcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
surcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = surcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(surcoind):
address_summary = dict()
address_to_account = dict()
for info in surcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = surcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = surcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-surcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(surcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(surcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to surcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = surcoind.createrawtransaction(inputs, outputs)
signed_rawtx = surcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(surcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = surcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(surcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = surcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(surcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get surcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send surcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of surcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
surcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(surcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(surcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(surcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(surcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = surcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
"""
@package mi.instrument.kml.cam.camds.driver
@file marine-integrations/mi/instrument/kml/cam/camds/test/test_driver.py
@author Sung Ahn
@brief Test Driver for CAMDS
Release notes:
"""
__author__ = 'Sung Ahn'
__license__ = 'Apache 2.0'
import copy
import time
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.instrument.chunker import StringChunker
from mi.core.log import get_logger
log = get_logger()
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.instrument.kml.cam.camds.driver import DataParticleType, CamdsDiskStatusKey, CamdsHealthStatusKey
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import DriverStartupConfigKey
from mi.instrument.kml.cam.camds.driver import Parameter, ParameterIndex
from mi.instrument.kml.cam.camds.driver import CAMDSPrompt, InstrumentDriver, CAMDSProtocol
from mi.instrument.kml.cam.camds.driver import ScheduledJob
from mi.instrument.kml.cam.camds.driver import InstrumentCmds, ProtocolState, ProtocolEvent, Capability
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey
from mi.core.common import BaseEnum
NEWLINE = '\r\n'
# ##
# Driver parameters for tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.kml.cam.camds.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='HTWZMW',
instrument_agent_preload_id='IA7',
instrument_agent_name='kml cam',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.KEY]: Parameter.ACQUIRE_STATUS_INTERVAL[
ParameterIndex.DEFAULT_DATA],
Parameter.AUTO_CAPTURE_DURATION[ParameterIndex.KEY]: Parameter.AUTO_CAPTURE_DURATION[
ParameterIndex.DEFAULT_DATA],
Parameter.CAMERA_GAIN[ParameterIndex.KEY]: Parameter.CAMERA_GAIN[ParameterIndex.DEFAULT_DATA],
Parameter.CAMERA_MODE[ParameterIndex.KEY]: Parameter.CAMERA_MODE[ParameterIndex.DEFAULT_DATA],
Parameter.COMPRESSION_RATIO[ParameterIndex.KEY]: Parameter.COMPRESSION_RATIO[ParameterIndex.DEFAULT_DATA],
Parameter.FOCUS_POSITION[ParameterIndex.KEY]: Parameter.FOCUS_POSITION[ParameterIndex.DEFAULT_DATA],
Parameter.FOCUS_SPEED[ParameterIndex.KEY]: Parameter.FOCUS_SPEED[ParameterIndex.DEFAULT_DATA],
Parameter.FRAME_RATE[ParameterIndex.KEY]: Parameter.FRAME_RATE[ParameterIndex.DEFAULT_DATA],
Parameter.IMAGE_RESOLUTION[ParameterIndex.KEY]: Parameter.IMAGE_RESOLUTION[ParameterIndex.DEFAULT_DATA],
Parameter.IRIS_POSITION[ParameterIndex.KEY]: Parameter.IRIS_POSITION[ParameterIndex.DEFAULT_DATA],
Parameter.LAMP_BRIGHTNESS[ParameterIndex.KEY]: Parameter.LAMP_BRIGHTNESS[ParameterIndex.DEFAULT_DATA],
Parameter.NETWORK_DRIVE_LOCATION[ParameterIndex.KEY]: Parameter.NETWORK_DRIVE_LOCATION[
ParameterIndex.DEFAULT_DATA],
Parameter.NTP_SETTING[ParameterIndex.KEY]: Parameter.NTP_SETTING[ParameterIndex.DEFAULT_DATA],
Parameter.PAN_POSITION[ParameterIndex.KEY]: Parameter.PAN_POSITION[ParameterIndex.DEFAULT_DATA],
Parameter.PAN_SPEED[ParameterIndex.KEY]: Parameter.PAN_SPEED[ParameterIndex.DEFAULT_DATA],
Parameter.PRESET_NUMBER[ParameterIndex.KEY]: Parameter.PRESET_NUMBER[ParameterIndex.DEFAULT_DATA],
Parameter.SAMPLE_INTERVAL[ParameterIndex.KEY]: Parameter.SAMPLE_INTERVAL[ParameterIndex.DEFAULT_DATA],
Parameter.SHUTTER_SPEED[ParameterIndex.KEY]: Parameter.SHUTTER_SPEED[ParameterIndex.DEFAULT_DATA],
Parameter.SOFT_END_STOPS[ParameterIndex.KEY]: Parameter.SOFT_END_STOPS[ParameterIndex.DEFAULT_DATA],
Parameter.TILT_POSITION[ParameterIndex.KEY]: Parameter.TILT_POSITION[ParameterIndex.DEFAULT_DATA],
Parameter.TILT_SPEED[ParameterIndex.KEY]: Parameter.TILT_SPEED[ParameterIndex.DEFAULT_DATA],
Parameter.VIDEO_FORWARDING[ParameterIndex.KEY]: Parameter.VIDEO_FORWARDING[ParameterIndex.DEFAULT_DATA],
Parameter.VIDEO_FORWARDING_TIMEOUT[ParameterIndex.KEY]: Parameter.VIDEO_FORWARDING_TIMEOUT[
ParameterIndex.DEFAULT_DATA],
Parameter.WHEN_DISK_IS_FULL[ParameterIndex.KEY]: Parameter.WHEN_DISK_IS_FULL[ParameterIndex.DEFAULT_DATA],
Parameter.ZOOM_POSITION[ParameterIndex.KEY]: Parameter.ZOOM_POSITION[ParameterIndex.DEFAULT_DATA],
Parameter.ZOOM_SPEED[ParameterIndex.KEY]: Parameter.ZOOM_SPEED[ParameterIndex.DEFAULT_DATA]
},
DriverStartupConfigKey.SCHEDULER: {
ScheduledJob.VIDEO_FORWARDING: {},
ScheduledJob.SAMPLE: {},
ScheduledJob.STATUS: {},
ScheduledJob.STOP_CAPTURE: {}
}
}
)
class TeledynePrompt(BaseEnum):
"""
Device i/o prompts..
"""
COMMAND = '\r\n>\r\n>'
ERR = 'ERR:'
###################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class CAMDSMixin(DriverTestMixin):
"""
Mixin class used for storing data particle constance
and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
###
# Parameter and Type Definitions
###
_driver_parameters = {
Parameter.CAMERA_GAIN[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.CAMERA_GAIN[ParameterIndex.D_DEFAULT],
VALUE: Parameter.CAMERA_GAIN[ParameterIndex.D_DEFAULT]},
Parameter.CAMERA_MODE[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: True, STARTUP: True,
DEFAULT: Parameter.CAMERA_MODE[ParameterIndex.D_DEFAULT],
VALUE: Parameter.CAMERA_MODE[ParameterIndex.D_DEFAULT]},
Parameter.COMPRESSION_RATIO[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: True, STARTUP: True,
DEFAULT: Parameter.COMPRESSION_RATIO[ParameterIndex.D_DEFAULT],
VALUE: Parameter.COMPRESSION_RATIO[ParameterIndex.D_DEFAULT]},
Parameter.FOCUS_POSITION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.FOCUS_POSITION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.FOCUS_POSITION[ParameterIndex.D_DEFAULT]},
Parameter.FOCUS_SPEED[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.FOCUS_SPEED[ParameterIndex.D_DEFAULT],
VALUE: Parameter.FOCUS_SPEED[ParameterIndex.D_DEFAULT]},
Parameter.FRAME_RATE[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: True, STARTUP: True,
DEFAULT: Parameter.FRAME_RATE[ParameterIndex.D_DEFAULT],
VALUE: Parameter.FRAME_RATE[ParameterIndex.D_DEFAULT]},
Parameter.IMAGE_RESOLUTION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: True, STARTUP: True,
DEFAULT: Parameter.IMAGE_RESOLUTION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.IMAGE_RESOLUTION[ParameterIndex.D_DEFAULT]},
Parameter.IRIS_POSITION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.IRIS_POSITION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.IRIS_POSITION[ParameterIndex.D_DEFAULT]},
Parameter.LAMP_BRIGHTNESS[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.LAMP_BRIGHTNESS[ParameterIndex.D_DEFAULT],
VALUE: Parameter.LAMP_BRIGHTNESS[ParameterIndex.D_DEFAULT]},
Parameter.NETWORK_DRIVE_LOCATION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: True,
DEFAULT: Parameter.NETWORK_DRIVE_LOCATION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.NETWORK_DRIVE_LOCATION[ParameterIndex.D_DEFAULT]},
Parameter.NTP_SETTING[ParameterIndex.KEY]:
{TYPE: str, READONLY: True, DA: True, STARTUP: False,
DEFAULT: None, VALUE: None},
Parameter.PAN_POSITION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.PAN_POSITION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.PAN_POSITION[ParameterIndex.D_DEFAULT]},
Parameter.PAN_SPEED[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.PAN_SPEED[ParameterIndex.D_DEFAULT],
VALUE: Parameter.PAN_SPEED[ParameterIndex.D_DEFAULT]},
Parameter.SHUTTER_SPEED[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.SHUTTER_SPEED[ParameterIndex.D_DEFAULT],
VALUE: Parameter.SHUTTER_SPEED[ParameterIndex.D_DEFAULT]},
Parameter.SOFT_END_STOPS[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.SOFT_END_STOPS[ParameterIndex.D_DEFAULT],
VALUE: Parameter.SOFT_END_STOPS[ParameterIndex.D_DEFAULT]},
Parameter.TILT_POSITION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.TILT_POSITION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.TILT_POSITION[ParameterIndex.D_DEFAULT]},
Parameter.TILT_SPEED[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.TILT_SPEED[ParameterIndex.D_DEFAULT],
VALUE: Parameter.TILT_SPEED[ParameterIndex.D_DEFAULT]},
Parameter.WHEN_DISK_IS_FULL[ParameterIndex.KEY]:
{TYPE: str, READONLY: True, DA: True, STARTUP: False,
DEFAULT: None, VALUE: None},
Parameter.ZOOM_POSITION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.ZOOM_POSITION[ParameterIndex.D_DEFAULT],
VALUE: Parameter.ZOOM_POSITION[ParameterIndex.D_DEFAULT]},
Parameter.ZOOM_SPEED[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.ZOOM_SPEED[ParameterIndex.D_DEFAULT],
VALUE: Parameter.ZOOM_SPEED[ParameterIndex.D_DEFAULT]},
# Engineering parameters
Parameter.PRESET_NUMBER[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.PRESET_NUMBER[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.PRESET_NUMBER[ParameterIndex.D_DEFAULT]},
Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.D_DEFAULT]},
Parameter.VIDEO_FORWARDING[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.VIDEO_FORWARDING[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.VIDEO_FORWARDING[ParameterIndex.D_DEFAULT]},
Parameter.VIDEO_FORWARDING_TIMEOUT[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.VIDEO_FORWARDING_TIMEOUT[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.VIDEO_FORWARDING_TIMEOUT[ParameterIndex.D_DEFAULT]},
Parameter.SAMPLE_INTERVAL[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.SAMPLE_INTERVAL[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.SAMPLE_INTERVAL[ParameterIndex.D_DEFAULT]},
Parameter.AUTO_CAPTURE_DURATION[ParameterIndex.KEY]:
{TYPE: str, READONLY: False, DA: False, STARTUP: False,
DEFAULT: Parameter.AUTO_CAPTURE_DURATION[ParameterIndex.DEFAULT_DATA],
VALUE: Parameter.AUTO_CAPTURE_DURATION[ParameterIndex.D_DEFAULT]}
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_CAPTURE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.GOTO_PRESET: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.SET_PRESET: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LAMP_OFF: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LAMP_ON: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_1_OFF: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_1_ON: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_2_OFF: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_2_ON: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_BOTH_OFF: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.LASER_BOTH_ON: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.EXECUTE_AUTO_CAPTURE: {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
}
size_1 = chr(0x01)
size_2 = chr(0x02)
size_3 = chr(0x03)
size_5 = chr(0x05)
size_6 = chr(0x06)
size_9 = chr(0x09)
size_A = chr(0x0A)
size_C = chr(0x0C)
size_4 = chr(0x04)
size_7 = chr(0x07)
size_8 = chr(0x08)
size_B = chr(0x0B)
_ACK = chr(0x06)
_health_data = '<' + size_7 + ':' + size_6 + ':' + 'HS' + size_1 + size_2 + size_3 + '>'
_health_dict = {
CamdsHealthStatusKey.humidity: {'type': int, 'value': 2},
CamdsHealthStatusKey.temp: {'type': int, 'value': 1},
CamdsHealthStatusKey.error: {'type': int, 'value': 3}
}
_disk_data = '<' + size_B + ':' + size_6 + ':' + 'GC' + size_1 + size_2 + \
size_3 + size_4 + size_5 + size_6 + size_7 + '>'
_disk_status_dict = {
CamdsDiskStatusKey.disk_remaining: {'type': int, 'value': 100},
CamdsDiskStatusKey.image_on_disk: {'type': int, 'value': 3},
CamdsDiskStatusKey.image_remaining: {'type': int, 'value': 1029},
CamdsDiskStatusKey.size: {'type': int, 'value': 1543},
}
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
log.debug("assert_driver_parameters current_parameters = " + str(current_parameters))
temp_parameters = copy.deepcopy(self._driver_parameters)
temp_parameters.update(self._driver_parameters)
self.assert_parameters(current_parameters, temp_parameters, verify_values)
def assert_health_data(self, data_particle, verify_values=True):
"""
Verify CAMDS health status data particle
@param data_particle: CAMDS health status DataParticle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.CAMDS_HEALTH_STATUS)
self.assert_data_particle_parameters(data_particle, self._health_dict) # , verify_values
def assert_disk_data(self, data_particle, verify_values=True):
"""
Verify CAMDS disk status data particle
@param data_particle: CAMDS disk status data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.CAMDS_DISK_STATUS)
self.assert_data_particle_parameters(data_particle, self._disk_status_dict) # , verify_values
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, CAMDSMixin):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
temp_parameters = copy.deepcopy(self._driver_parameters)
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, temp_parameters, self._driver_capabilities)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self._health_data, self.assert_health_data, True)
self.assert_particle_published(driver, self._disk_data, self.assert_disk_data, True)
def test_driver_parameters(self):
"""
Verify the set of parameters known by the driver
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, ProtocolState.COMMAND)
expected_parameters = sorted(self._driver_parameters.keys())
expected_parameters = sorted(expected_parameters)
reported_parameters = sorted(driver.get_resource(Parameter.ALL))
self.assertEqual(reported_parameters, expected_parameters)
# Verify the parameter definitions
self.assert_driver_parameter_definition(driver, self._driver_parameters)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(InstrumentCmds())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ScheduledJob())
# Test capabilities for duplicates, them verify that capabilities is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(CAMDSProtocol.sieve_function)
self.assert_chunker_sample(chunker, self._health_data)
self.assert_chunker_sample_with_noise(chunker, self._health_data)
self.assert_chunker_fragmented_sample(chunker, self._health_data, 5)
self.assert_chunker_combined_sample(chunker, self._health_data)
self.assert_chunker_sample(chunker, self._disk_data)
self.assert_chunker_sample_with_noise(chunker, self._disk_data)
self.assert_chunker_fragmented_sample(chunker, self._disk_data, 6)
self.assert_chunker_combined_sample(chunker, self._disk_data)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
my_event_callback = Mock(spec="UNKNOWN WHAT SHOULD GO HERE FOR evt_callback")
protocol = CAMDSProtocol(CAMDSPrompt, NEWLINE, my_event_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(driver_capabilities, protocol._filter_capabilities(test_capabilities))
def test_set(self):
params = [
(Parameter.CAMERA_GAIN, 1, '<\x04:GS:\x01>'),
(Parameter.CAMERA_GAIN, 2, '<\x04:GS:\x02>'),
(Parameter.CAMERA_GAIN, 3, '<\x04:GS:\x03>'),
(Parameter.CAMERA_GAIN, 4, '<\x04:GS:\x04>'),
(Parameter.CAMERA_GAIN, 5, '<\x04:GS:\x05>'),
(Parameter.CAMERA_GAIN, 32, '<\x04:GS:\x20>'),
(Parameter.CAMERA_GAIN, 255, '<\x04:GS:\xff>'),
(Parameter.CAMERA_MODE, 0, '<\x04:SV:\x00>'),
(Parameter.CAMERA_MODE, 9, '<\x04:SV:\x09>'),
(Parameter.CAMERA_MODE, 10, '<\x04:SV:\x0a>'),
(Parameter.CAMERA_MODE, 11, '<\x04:SV:\x0b>'),
(Parameter.FRAME_RATE, 1, '<\x04:FR:\x01>'),
(Parameter.FRAME_RATE, 5, '<\x04:FR:\x05>'),
(Parameter.FRAME_RATE, 10, '<\x04:FR:\x0a>'),
(Parameter.FRAME_RATE, 20, '<\x04:FR:\x14>'),
(Parameter.FRAME_RATE, 30, '<\x04:FR:\x1e>'),
(Parameter.IMAGE_RESOLUTION, 1, '<\x04:SD:\x01>'),
(Parameter.IMAGE_RESOLUTION, 2, '<\x04:SD:\x02>'),
(Parameter.IMAGE_RESOLUTION, 4, '<\x04:SD:\x04>'),
(Parameter.IMAGE_RESOLUTION, 8, '<\x04:SD:\x08>'),
(Parameter.IMAGE_RESOLUTION, 16, '<\x04:SD:\x10>'),
(Parameter.IMAGE_RESOLUTION, 32, '<\x04:SD:\x20>'),
(Parameter.IMAGE_RESOLUTION, 64, '<\x04:SD:\x40>'),
(Parameter.IMAGE_RESOLUTION, 100, '<\x04:SD:\x64>'),
(Parameter.PAN_SPEED, 1, '<\x04:DS:\x01>'),
(Parameter.PAN_SPEED, 2, '<\x04:DS:\x02>'),
(Parameter.PAN_SPEED, 4, '<\x04:DS:\x04>'),
(Parameter.PAN_SPEED, 8, '<\x04:DS:\x08>'),
(Parameter.PAN_SPEED, 16, '<\x04:DS:\x10>'),
(Parameter.PAN_SPEED, 32, '<\x04:DS:\x20>'),
(Parameter.PAN_SPEED, 50, '<\x04:DS:\x32>'),
(Parameter.PAN_SPEED, 64, '<\x04:DS:\x40>'),
(Parameter.PAN_SPEED, 100, '<\x04:DS:\x64>'),
(Parameter.COMPRESSION_RATIO, 1, '<\x04:CD:\x01>'),
(Parameter.COMPRESSION_RATIO, 2, '<\x04:CD:\x02>'),
(Parameter.COMPRESSION_RATIO, 4, '<\x04:CD:\x04>'),
(Parameter.COMPRESSION_RATIO, 8, '<\x04:CD:\x08>'),
(Parameter.COMPRESSION_RATIO, 16, '<\x04:CD:\x10>'),
(Parameter.COMPRESSION_RATIO, 32, '<\x04:CD:\x20>'),
(Parameter.COMPRESSION_RATIO, 64, '<\x04:CD:\x40>'),
(Parameter.COMPRESSION_RATIO, 100, '<\x04:CD:\x64>'),
(Parameter.FOCUS_POSITION, 0, '<\x04:FG:\x00>'),
(Parameter.FOCUS_POSITION, 100, '<\x04:FG:\x64>'),
(Parameter.FOCUS_POSITION, 200, '<\x04:FG:\xc8>'),
(Parameter.PAN_POSITION, 0, '<\x06:PP:000>'),
(Parameter.PAN_POSITION, 45, '<\x06:PP:045>'),
(Parameter.PAN_POSITION, 90, '<\x06:PP:090>'),
(Parameter.SHUTTER_SPEED, '25:3', '<\x05:ET:\x19\x03>'),
(Parameter.SHUTTER_SPEED, '6:7', '<\x05:ET:\x06\x07>'),
(Parameter.SHUTTER_SPEED, '255:255', '<\x05:ET:\xff\xff>'),
(Parameter.TILT_POSITION, 0, '<\x06:TP:000>'),
(Parameter.TILT_POSITION, 45, '<\x06:TP:045>'),
(Parameter.TILT_POSITION, 90, '<\x06:TP:090>'),
(Parameter.TILT_SPEED, 0, '<\x04:TA:\x00>'),
(Parameter.TILT_SPEED, 50, '<\x04:TA:\x32>'),
(Parameter.TILT_SPEED, 100, '<\x04:TA:\x64>'),
(Parameter.ZOOM_SPEED, 0, '<\x04:ZX:\x00>'),
(Parameter.FOCUS_SPEED, 0, '<\x04:FX:\x00>'),
(Parameter.ZOOM_POSITION, 100, '<\x04:ZG:d>'),
(Parameter.PAN_SPEED, 50, '<\x04:DS:2>'),
(Parameter.PAN_POSITION, 90, '<\x06:PP:090>'),
(Parameter.CAMERA_MODE, 9, '<\x04:SV:\t>'),
(Parameter.TILT_SPEED, 50, '<\x04:TA:2>'),
(Parameter.IRIS_POSITION, 8, '<\x04:IG:\x08>'),
(Parameter.SOFT_END_STOPS, 1, '<\x04:ES:\x01>'),
(Parameter.FOCUS_POSITION, 100, '<\x04:FG:d>'),
(Parameter.COMPRESSION_RATIO, 100, '<\x04:CD:d>'),
(Parameter.NETWORK_DRIVE_LOCATION, 0, '<\x04:FL:\x00>'),
(Parameter.LAMP_BRIGHTNESS, '3:50', '<\x05:BF:\x032>'),
]
for param, input_value, output_value in params:
key = param[ParameterIndex.KEY]
self.assertEqual(output_value, self._build_set_command(key, input_value))
def _build_set_command(self, param, val):
"""
Build handler for set commands. param=val followed by newline.
String val constructed by param dict formatting function.
@param param the parameter key to set.
@param val the parameter value to set.
@return The set command to be sent to the device.
@throws InstrumentParameterException if the parameter is not valid or
if the formatting function could not accept the value passed.
"""
try:
if param in [Parameter.PAN_POSITION[ParameterIndex.KEY],
Parameter.TILT_POSITION[ParameterIndex.KEY]]:
if not isinstance(val, int) or val > 999:
raise Exception('The desired value for %s must be an integer less than 999: %s'
% (param, val))
val = '%03d' % val
elif isinstance(val, str) or isinstance(val, unicode):
val = ''.join(chr(int(x)) for x in val.split(':'))
else:
val = chr(val)
if param == Parameter.NTP_SETTING[ParameterIndex.KEY]:
val = val + Parameter.NTP_SETTING[ParameterIndex.DEFAULT_DATA]
data_size = len(val) + 3
param_tuple = getattr(Parameter, param)
set_cmd = '<%s:%s:%s>' % (chr(data_size), param_tuple[ParameterIndex.SET], val)
except KeyError:
raise Exception('Unknown driver parameter. %s' % param)
return set_cmd
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, CAMDSMixin):
_tested = {}
def setUp(self):
self.port_agents = {}
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_disk_status(self, data_particle, verify_values=True):
"""
Verify a disk status particle
@param data_particle: CAMDS disk status particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.CAMDS_DISK_STATUS)
self.assert_data_particle_parameters(data_particle, self._disk_status_dict) # , verify_values
def assert_health_status(self, data_particle, verify_values=True):
"""
Verify a health status particle
@param data_particle: CAMDS health status particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.CAMDS_HEALTH_STATUS)
self.assert_data_particle_parameters(data_particle, self._health_dict) # , verify_values
def assert_sample_meta(self, data_particle, verify_values=True):
"""
Verify an image meta particle
@param data_particle: CAMDS image meta data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.CAMDS_IMAGE_METADATA)
def assert_acquire_status(self):
"""
Check data stream types for acquire_status()
"""
self.assert_async_particle_generation(DataParticleType.CAMDS_DISK_STATUS, self.assert_disk_status,
timeout=60)
self.assert_async_particle_generation(DataParticleType.CAMDS_HEALTH_STATUS,
self.assert_health_status, timeout=60)
def assert_acquire_sample(self):
"""
Check data stream types for acquire_status()
"""
self.assert_async_particle_generation(DataParticleType.CAMDS_IMAGE_METADATA, self.assert_sample_meta,
timeout=60)
def test_connection(self):
log.debug("######## Starting test_connection ##########")
self.assert_initialize_driver()
# Overwritten method
def test_driver_process(self):
"""
Test for correct launch of driver process and communications, including asynchronous driver events.
Overridden to support multiple port agents.
"""
log.info("Ensuring driver process was started properly ...")
# Verify processes exist.
self.assertNotEqual(self.driver_process, None)
drv_pid = self.driver_process.getpid()
self.assertTrue(isinstance(drv_pid, int))
self.assertNotEqual(self.port_agents, None)
for port_agent in self.port_agents.values():
pagent_pid = port_agent.get_pid()
self.assertTrue(isinstance(pagent_pid, int))
# Send a test message to the process interface, confirm result.
reply = self.driver_client.cmd_dvr('process_echo')
self.assert_(reply.startswith('ping from resource ppid:'))
reply = self.driver_client.cmd_dvr('driver_ping', 'foo')
self.assert_(reply.startswith('driver_ping: foo'))
# Test the event thread publishes and client side picks up events.
events = [
'I am important event #1!',
'And I am important event #2!'
]
self.driver_client.cmd_dvr('test_events', events=events)
time.sleep(1)
# Confirm the events received are as expected.
self.assertEqual(self.events, events)
# Test the exception mechanism.
# with self.assertRaises(ResourceError):
# exception_str = 'Oh no, something bad happened!'
# self.driver_client.cmd_dvr('test_exceptions', exception_str)
# Set bulk params and test auto sampling
def test_autosample_particle_generation(self):
"""
Test that we can generate particles when in autosample
"""
self.assert_initialize_driver()
params = {
Parameter.CAMERA_GAIN: 255,
Parameter.CAMERA_MODE: 9,
Parameter.FRAME_RATE: 30,
Parameter.IMAGE_RESOLUTION: 1,
Parameter.PAN_SPEED: 50,
Parameter.COMPRESSION_RATIO: 100,
Parameter.FOCUS_POSITION: 100,
Parameter.PAN_POSITION: 90,
Parameter.SHUTTER_SPEED: '255:255',
Parameter.TILT_POSITION: 90,
Parameter.TILT_SPEED: 50,
}
self.assert_set_bulk(params)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=10)
# test commands in different modes
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE,
delay=20)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, delay=2)
self.assert_acquire_status()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=2)
self.assert_acquire_sample()
self.assert_driver_command(ProtocolEvent.GOTO_PRESET)
self.assert_driver_command(ProtocolEvent.SET_PRESET)
self.assert_driver_command(ProtocolEvent.STOP_FORWARD)
self.assert_driver_command(ProtocolEvent.LAMP_ON)
self.assert_driver_command(ProtocolEvent.LAMP_OFF)
self.assert_driver_command(ProtocolEvent.LASER_1_ON)
self.assert_driver_command(ProtocolEvent.LASER_2_ON)
self.assert_driver_command(ProtocolEvent.LASER_1_OFF)
self.assert_driver_command(ProtocolEvent.LASER_2_OFF)
self.assert_driver_command(ProtocolEvent.LASER_BOTH_ON)
self.assert_driver_command(ProtocolEvent.LASER_BOTH_OFF)
# ####
# # Test in streaming mode
# ####
# # Put us in streaming
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE,
delay=1)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, delay=2)
self.assert_acquire_status()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=2)
self.assert_acquire_sample()
self.assert_driver_command(ProtocolEvent.GOTO_PRESET)
self.assert_driver_command(ProtocolEvent.SET_PRESET)
self.assert_driver_command(ProtocolEvent.STOP_FORWARD)
self.assert_driver_command(ProtocolEvent.LAMP_ON)
self.assert_driver_command(ProtocolEvent.LAMP_OFF)
self.assert_driver_command(ProtocolEvent.LASER_1_ON)
self.assert_driver_command(ProtocolEvent.LASER_2_ON)
self.assert_driver_command(ProtocolEvent.LASER_1_OFF)
self.assert_driver_command(ProtocolEvent.LASER_2_OFF)
self.assert_driver_command(ProtocolEvent.LASER_BOTH_ON)
self.assert_driver_command(ProtocolEvent.LASER_BOTH_OFF)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)
def test_scheduled_acquire_status_command(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
self.assert_initialize_driver()
self.assert_set(Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.KEY], '00:00:07')
time.sleep(15)
self.assert_acquire_status()
self.assert_set(Parameter.ACQUIRE_STATUS_INTERVAL[ParameterIndex.KEY], '00:00:00')
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_acquire_status_autosample(self):
"""
Verify the scheduled acquire status is triggered and functions as expected
"""
self.assert_initialize_driver()
self.assert_current_state(ProtocolState.COMMAND)
self.assert_set(Parameter.SAMPLE_INTERVAL, '00:00:04')
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
time.sleep(10)
self.assert_acquire_sample()
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE)
self.assert_current_state(ProtocolState.COMMAND)
self.assert_set(Parameter.SAMPLE_INTERVAL, '00:00:00')
self.assert_current_state(ProtocolState.COMMAND)
def test_scheduled_capture(self):
"""
Verify the scheduled acquire status is triggered and functions as expected
"""
self.assert_initialize_driver()
self.assert_current_state(ProtocolState.COMMAND)
self.assert_set(Parameter.AUTO_CAPTURE_DURATION, '00:00:02')
self.assert_driver_command(InstrumentCmds.START_CAPTURE)
time.sleep(1)
self.assert_acquire_sample()
time.sleep(2)
self.assert_current_state(ProtocolState.COMMAND)
def test_acquire_status(self):
"""
Verify the acquire_status command is functional
"""
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_acquire_status()
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``admin.packaging``.
"""
from glob import glob
from subprocess import check_output
from textwrap import dedent
from unittest import skipIf
from StringIO import StringIO
from twisted.python.filepath import FilePath
from twisted.python.procutils import which
from twisted.python.usage import UsageError
from twisted.trial.unittest import TestCase
from virtualenv import REQUIRED_MODULES as VIRTUALENV_REQUIRED_MODULES
from flocker.testtools import FakeSysModule
from .. import packaging
from ..packaging import (
omnibus_package_builder, InstallVirtualEnv, InstallApplication,
BuildPackage, BuildSequence, BuildOptions, BuildScript, DockerBuildOptions,
DockerBuildScript, GetPackageVersion, DelayedRpmVersion, CreateLinks,
PythonPackage, create_virtualenv, VirtualEnv, PackageTypes, Distribution,
Dependency, build_in_docker, DockerBuild, DockerRun,
PACKAGE, PACKAGE_PYTHON, PACKAGE_CLI, PACKAGE_NODE, PACKAGE_DOCKER_PLUGIN,
make_dependencies, available_distributions,
LintPackage,
)
from flocker.common.version import RPMVersion
FLOCKER_PATH = FilePath(__file__).parent().parent().parent()
require_fpm = skipIf(not which('fpm'), "Tests require the ``fpm`` command.")
require_rpm = skipIf(not which('rpm'), "Tests require the ``rpm`` command.")
require_rpmlint = skipIf(not which('rpmlint'),
"Tests require the ``rpmlint`` command.")
require_dpkg = skipIf(not which('dpkg'), "Tests require the ``dpkg`` command.")
require_lintian = skipIf(not which('lintian'),
"Tests require the ``lintian`` command.")
DOCKER_SOCK = '/var/run/docker.sock'
def assert_equal_steps(test_case, expected, actual):
"""
Assert that the list of provided steps are the same.
If they are not, display the differences intelligently.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
:raises: ``TestFailure`` if the build steps are not equal, showing the
unequal or missing steps.
"""
expected_steps = getattr(expected, 'steps')
actual_steps = getattr(actual, 'steps')
if None in (expected_steps, actual_steps):
test_case.assertEqual(expected, actual)
else:
mismatch_steps = []
missing_steps = []
index = 0
for index, expected_step in enumerate(expected_steps):
try:
actual_step = actual_steps[index]
except IndexError:
missing_steps = expected_steps[index:]
break
if expected_step != actual_step:
mismatch_steps.append(
'* expected: {} !=\n'
' actual: {}'.format(
expected_step, actual_step))
extra_steps = actual_steps[index+1:]
if mismatch_steps or missing_steps or extra_steps:
test_case.fail(
'Step Mismatch\n'
'Mismatch:\n{}\n'
'Missing:\n{}\n'
'Extra:\n{}'.format(
'\n'.join(mismatch_steps), missing_steps, extra_steps)
)
def assert_dict_contains(test_case, expected, actual, message=''):
"""
Fail unless the supplied ``actual`` ``dict`` contains all the items in
``expected``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param expected: The expected build step instance.
:param actual: The actual build step instance.
"""
missing_items = []
mismatch_items = []
no_value = object()
for key, expected_value in expected.items():
actual_value = actual.get(key, no_value)
if actual_value is no_value:
missing_items.append(key)
elif actual_value != expected_value:
mismatch_items.append(
'{}: {} != {}'.format(key, expected_value, actual_value)
)
if missing_items or mismatch_items:
test_case.fail(
'{}\n'
'Missing items: {}\n'
'Mismatch items: {}\n'
'Actual items: {}'.format(
message, missing_items, mismatch_items, actual)
)
def parse_colon_dict(data):
"""
Parse colon seperated values into a dictionary, treating lines
lacking a colon as continutation lines.
Any leading lines without a colon will be associated with the key
``None``.
This is the format output by ``rpm --query`` and ``dpkg --info``.
:param bytes data: Data to parse
:return: A ``dict`` containing the parsed data.
"""
result = {}
key = None
for line in data.splitlines():
parts = [value.strip() for value in line.split(':', 1)]
if len(parts) == 2:
key, val = parts
result[key] = val
else:
result.setdefault(key, '')
result[key] += parts[0]
return result
def assert_rpm_headers(test_case, expected_headers, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--info', '--package', rpm_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing RPM Headers: '
)
def assert_rpm_content(test_case, expected_paths, package_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output = check_output(
['rpm', '--query', '--list', '--package', package_path.path]
)
actual_paths = set(map(FilePath, output.splitlines()))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_content(test_case, expected_paths, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_paths``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param set expected_paths: A set of ``FilePath`` s
:param FilePath package_path: The path to the package under test.
"""
output_dir = FilePath(test_case.mktemp())
output_dir.makedirs()
check_output(['dpkg', '--extract', package_path.path, output_dir.path])
actual_paths = set()
for f in output_dir.walk():
if f.isdir():
continue
actual_paths.add(FilePath('/').descendant(f.segmentsFrom(output_dir)))
test_case.assertEqual(expected_paths, actual_paths)
def assert_deb_headers(test_case, expected_headers, package_path):
"""
Fail unless the ``deb`` file at ``package_path`` contains all the
``expected_headers``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param dict expected_headers: A dictionary of header key / value pairs.
:param FilePath package_path: The path to the deb file under test.
"""
output = check_output(
['dpkg', '--info', package_path.path]
)
actual_headers = parse_colon_dict(output)
assert_dict_contains(
test_case, expected_headers, actual_headers, 'Missing dpkg Headers: '
)
def assert_rpm_requires(test_case, expected_requirements, rpm_path):
"""
Fail unless the ``RPM`` file at ``rpm_path`` has all the
``expected_requirements``.
:param test_case: The ``TestCase`` whose assert methods will be called.
:param list expected_requirements: A list of requirement strings.
:param FilePath rpm_path: The path to the RPM file under test.
"""
output = check_output(
['rpm', '--query', '--requires', '--package', rpm_path.path]
)
actual_requirements = set(line.strip() for line in output.splitlines())
expected_requirements = set(expected_requirements)
missing_requirements = expected_requirements - actual_requirements
if missing_requirements:
test_case.fail('Missing requirements: {} in {}'.format(
missing_requirements, rpm_path.path))
class SpyVirtualEnv(object):
"""
A ``VirtualEnv`` like class which records the ``package_uri``s which are
supplied to its ``install`` method.
"""
def __init__(self):
self._installed_packages = []
def install(self, package_uri):
self._installed_packages.append(package_uri)
class SpyStep(object):
"""
A build step which records the fact that it has been run.
:ivar bool ran: ``False`` by default.
"""
ran = False
def run(self):
self.ran = True
class BuildSequenceTests(TestCase):
"""
Tests for ``BuildSequence``.
"""
def test_run(self):
"""
``BuildSequence`` calls the ``run`` method of each of its ``steps``.
"""
step1 = SpyStep()
step2 = SpyStep()
BuildSequence(steps=(step1, step2)).run()
self.assertEqual((True, True), (step1.ran, step2.ran))
def assert_has_paths(test_case, expected_paths, parent_path):
"""
Fail if any of the ``expected_paths`` are not existing relative paths of
``parent_path``.
:param TestCase test_case: The ``TestCase`` with which to make assertions.
:param list expected_paths: A ``list`` of ``bytes`` relative path names
which are expected to exist beneath ``parent_path``.
:param FilePath parent_path: The root ``FilePath`` in which to search for
``expected_paths``.
"""
missing_paths = []
for path in expected_paths:
if not parent_path.preauthChild(path).exists():
missing_paths.append(path)
if missing_paths:
test_case.fail('Missing paths: {}'.format(missing_paths))
class InstallVirtualEnvTests(TestCase):
"""
Tests for ``InstallVirtualEnv``.
"""
def test_run(self):
"""
``InstallVirtualEnv.run`` installs a virtual python environment using
create_virtualenv passing ``target_path`` as ``root``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
step = InstallVirtualEnv(virtualenv=virtualenv)
calls = []
self.patch(
step, '_create_virtualenv', lambda **kwargs: calls.append(kwargs))
step.run()
self.assertEqual([dict(root=virtualenv.root)], calls)
class CreateVirtualenvTests(TestCase):
"""
"""
def test_bin(self):
"""
``create_virtualenv`` installs a virtual python environment in its
``target_path``.
"""
virtualenv = VirtualEnv(root=FilePath(self.mktemp()))
InstallVirtualEnv(virtualenv=virtualenv).run()
expected_paths = ['bin/pip', 'bin/python']
assert_has_paths(self, expected_paths, virtualenv.root)
def test_pythonpath(self):
"""
``create_virtualenv`` installs a virtual python whose path does not
include the system python libraries.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
output = check_output([
target_path.descendant(['bin', 'python']).path,
'-c', r'import sys; sys.stdout.write("\n".join(sys.path))'
])
# We should probably check for lib64 as well here.
self.assertNotIn(
'/usr/lib/python2.7/site-packages', output.splitlines())
def test_bootstrap_pyc(self):
"""
``create_virtualenv`` creates links to the pyc files for all the
modules required for the virtualenv bootstrap process.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
py_files = []
for module_name in VIRTUALENV_REQUIRED_MODULES:
py_base = target_path.descendant(['lib', 'python2.7', module_name])
py = py_base.siblingExtension('.py')
pyc = py_base.siblingExtension('.pyc')
if py.exists() and False in (py.islink(), pyc.islink()):
py_files.append('PY: {} > {}\nPYC: {} > {}\n'.format(
'/'.join(py.segmentsFrom(target_path)),
py.realpath().path,
'/'.join(pyc.segmentsFrom(target_path)),
pyc.islink() and pyc.realpath().path or 'NOT A SYMLINK'
))
if py_files:
self.fail(
'Non-linked bootstrap pyc files in {}: \n{}'.format(
target_path, '\n'.join(py_files)
)
)
def test_internal_symlinks_only(self):
"""
The resulting ``virtualenv`` only contains symlinks to files inside the
virtualenv and to /usr on the host OS.
"""
target_path = FilePath(self.mktemp())
create_virtualenv(root=target_path)
allowed_targets = (target_path, FilePath('/usr'),)
bad_links = []
for path in target_path.walk():
if path.islink():
realpath = path.realpath()
for allowed_target in allowed_targets:
try:
realpath.segmentsFrom(allowed_target)
except ValueError:
pass
else:
# The target is a descendent of an allowed_target.
break
else:
bad_links.append(path)
if bad_links:
self.fail(
"Symlinks outside of virtualenv detected:" +
'\n'.join(
'/'.join(
path.segmentsFrom(target_path)
) + ' -> ' + path.realpath().path
for path in bad_links
)
)
class VirtualEnvTests(TestCase):
"""
Tests for ``VirtualEnv``.
"""
def test_install(self):
"""
``VirtualEnv.install`` accepts a ``PythonPackage`` instance and
installs it.
"""
virtualenv_dir = FilePath(self.mktemp())
virtualenv = create_virtualenv(root=virtualenv_dir)
package_dir = FilePath(self.mktemp())
package = canned_package(package_dir)
virtualenv.install(package_dir.path)
self.assertIn(
'{}-{}-py2.7.egg-info'.format(package.name, package.version),
[f.basename() for f in virtualenv_dir.descendant(
['lib', 'python2.7', 'site-packages']).children()]
)
class InstallApplicationTests(TestCase):
"""
Tests for ``InstallApplication``.
"""
def test_run(self):
"""
``InstallApplication.run`` installs the supplied application in the
``target_path``.
"""
package_uri = 'http://www.example.com/Bar-1.2.3.whl'
fake_env = SpyVirtualEnv()
InstallApplication(
virtualenv=fake_env,
package_uri=package_uri
).run()
self.assertEqual(
[package_uri], fake_env._installed_packages)
class CreateLinksTests(TestCase):
"""
Tests for ``CreateLinks``.
"""
def test_run(self):
"""
``CreateLinks.run`` generates symlinks in ``destination_path`` for all
the supplied ``links``.
"""
root = FilePath(self.mktemp())
bin_dir = root.descendant(['usr', 'bin'])
bin_dir.makedirs()
CreateLinks(
links=frozenset([
(FilePath('/opt/flocker/bin/flocker-foo'), bin_dir),
(FilePath('/opt/flocker/bin/flocker-bar'), bin_dir),
])
).run()
self.assertEqual(
set(FilePath('/opt/flocker/bin').child(script)
for script in ('flocker-foo', 'flocker-bar')),
set(child.realpath() for child in bin_dir.children())
)
def canned_package(root, version=b'0.3.2'):
"""
Create a directory containing an empty Python package which can be
installed and with a name and version which can later be tested.
:param FilePath root: The top-level directory of the canned package.
:param test_case: The ``TestCase`` whose mktemp method will be called.
:param version: The version of the created package.
:return: A ``PythonPackage`` instance.
"""
name = 'FooBar'
root.makedirs()
setup_py = root.child('setup.py')
setup_py.setContent(
dedent("""
from setuptools import setup
setup(
name="{package_name}",
version="{package_version}",
py_modules=["{package_name}"],
)
""").format(package_name=name, package_version=version)
)
package_module = root.child(name + ".py")
package_module.setContent(
dedent("""
__version__ = "{package_version}"
""").format(package_version=version)
)
return PythonPackage(name=name, version=version)
class GetPackageVersionTests(TestCase):
"""
Tests for ``GetPackageVersion``.
"""
def test_version_default(self):
"""
``GetPackageVersion.version`` is ``None`` by default.
"""
step = GetPackageVersion(virtualenv=None, package_name=None)
self.assertIs(None, step.version)
def assert_version_found(self, version):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
:param version: The version of the package to test package.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
package_root = FilePath(self.mktemp())
test_package = canned_package(root=package_root, version=version)
InstallApplication(
virtualenv=virtualenv, package_uri=package_root.path).run()
step = GetPackageVersion(
virtualenv=virtualenv, package_name=test_package.name)
step.run()
self.assertEqual(test_package.version, step.version)
def test_version_found(self):
"""
``GetPackageVersion`` assigns the exact version of a found package to
its ``version`` attribute.
"""
versions = [
'0.3.2',
'0.3.3.dev5',
'0.3.2.post1',
'0.3.2+1.gf661a6a',
'0.3.2.post1+1.gf661a6a',
'0.3.2rc1',
'0.3.2+1.gf661a6a.dirty'
'0.3.2.post1+1.gf661a6a.dirty'
]
for version in versions:
self.assert_version_found(version=version)
def test_version_not_found(self):
"""
``GetPackageVersion.run`` raises an exception if the supplied
``package_name`` is not installed in the supplied ``virtual_env``.
"""
test_env = FilePath(self.mktemp())
virtualenv = VirtualEnv(root=test_env)
InstallVirtualEnv(virtualenv=virtualenv).run()
step = GetPackageVersion(
virtualenv=virtualenv,
package_name='PackageWhichIsNotInstalled'
)
self.assertRaises(Exception, step.run)
class BuildPackageTests(TestCase):
"""
Tests for `BuildPackage`.
"""
@require_fpm
def setUp(self):
pass
@require_rpm
def test_rpm(self):
"""
``BuildPackage.run`` creates an RPM from the supplied ``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
])
expected_name = 'FooBar'
expected_epoch = b'3'
expected_rpm_version = RPMVersion(version='0.3', release='0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
expected_dependencies = ['test-dep', 'version-dep >= 42']
BuildPackage(
package_type=PackageTypes.RPM,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath('/'),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="Applications/System",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
rpms = glob('{}*.rpm'.format(
destination_path.child(expected_name).path))
self.assertEqual(1, len(rpms))
expected_headers = dict(
Name=expected_name,
Epoch=expected_epoch,
Version=expected_rpm_version.version,
Release=expected_rpm_version.release,
License=expected_license,
URL=expected_url,
Vendor=expected_vendor,
Packager=expected_maintainer,
Architecture=expected_architecture,
Group="Applications/System",
)
rpm_path = FilePath(rpms[0])
assert_rpm_requires(self, expected_dependencies, rpm_path)
assert_rpm_headers(self, expected_headers, rpm_path)
assert_rpm_content(self, expected_paths, rpm_path)
@require_dpkg
def test_deb(self):
"""
``BuildPackage.run`` creates a .deb package from the supplied
``source_path``.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
expected_prefix = FilePath('/foo/bar')
expected_paths = set([
expected_prefix.child('Foo'),
expected_prefix.child('Bar'),
FilePath('/other/file'),
# This is added automatically by fpm despite not supplying the
# --deb-changelog option
FilePath('/usr/share/doc/foobar/changelog.Debian.gz'),
])
expected_name = 'FooBar'.lower()
expected_epoch = b'3'
expected_rpm_version = RPMVersion(version='0.3', release='0.dev.1')
expected_license = 'My Test License'
expected_url = 'https://www.example.com/foo/bar'
expected_vendor = 'Acme Corporation'
expected_maintainer = 'noreply@example.com'
expected_architecture = 'i386'
expected_description = 'Explosive Tennis Balls'
BuildPackage(
package_type=PackageTypes.DEB,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/other/file'),
},
name=expected_name,
prefix=FilePath("/"),
epoch=expected_epoch,
rpm_version=expected_rpm_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture=expected_architecture,
description=expected_description,
category="admin",
dependencies=[
Dependency(package='test-dep'),
Dependency(package='version-dep', compare='>=', version='42')],
).run()
packages = glob('{}*.deb'.format(
destination_path.child(expected_name.lower()).path))
self.assertEqual(1, len(packages))
expected_headers = dict(
Package=expected_name,
Version=(
expected_epoch
+ b':'
+ expected_rpm_version.version
+ '-'
+ expected_rpm_version.release
),
License=expected_license,
Vendor=expected_vendor,
Architecture=expected_architecture,
Maintainer=expected_maintainer,
Homepage=expected_url,
Depends=', '.join(['test-dep', 'version-dep (>= 42)']),
Section="admin",
)
assert_deb_headers(self, expected_headers, FilePath(packages[0]))
assert_deb_content(self, expected_paths, FilePath(packages[0]))
class LintPackageTests(TestCase):
"""
Tests for ``LintPackage``.
"""
@require_fpm
def setUp(self):
pass
def assert_lint(self, package_type, expected_output):
"""
``LintPackage.run`` reports only unfiltered errors and raises
``SystemExit``.
:param PackageTypes package_type: The type of package to test.
:param bytes expected_output: The expected output of the linting.
"""
destination_path = FilePath(self.mktemp())
destination_path.makedirs()
source_path = FilePath(self.mktemp())
source_path.makedirs()
source_path.child('Foo').touch()
source_path.child('Bar').touch()
BuildPackage(
package_type=package_type,
destination_path=destination_path,
source_paths={
source_path: FilePath('/foo/bar'),
source_path.child('Foo'): FilePath('/opt/file'),
},
name="package-name",
prefix=FilePath('/'),
epoch=b'3',
rpm_version=RPMVersion(version='0.3', release='0.dev.1'),
license="Example",
url="https://package.example/",
vendor="Acme Corporation",
maintainer='Someone <noreply@example.com>',
architecture="all",
description="Description\n\nExtended",
category="none",
dependencies=[]
).run()
step = LintPackage(
package_type=package_type,
destination_path=destination_path,
epoch=b'3',
rpm_version=RPMVersion(version='0.3', release='0.dev.1'),
package='package-name',
architecture='all'
)
step.output = StringIO()
self.assertRaises(SystemExit, step.run)
self.assertEqual(step.output.getvalue(), expected_output)
@require_rpmlint
def test_rpm(self):
"""
rpmlint doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: no-changelogname-tag
# - W: no-documentation
# - E: zero-length
self.assert_lint(PackageTypes.RPM, b"""\
Package errors (package-name):
package-name.noarch: W: non-standard-group default
package-name.noarch: W: invalid-license Example
package-name.noarch: W: invalid-url URL: https://package.example/ \
<urlopen error [Errno -2] Name or service not known>
package-name.noarch: W: cross-directory-hard-link /foo/bar/Foo /opt/file
""")
@require_lintian
def test_deb(self):
"""
lintian doesn't report filtered errors.
"""
# The following warnings and errors are filtered.
# - E: package-name: no-copyright-file
# - E: package-name: dir-or-file-in-opt
# - W: package-name: file-missing-in-md5sums .../changelog.Debian.gz
self.assert_lint(PackageTypes.DEB, b"""\
Package errors (package-name):
W: package-name: unknown-section default
E: package-name: non-standard-toplevel-dir foo/
W: package-name: file-in-unusual-dir foo/bar/Bar
W: package-name: file-in-unusual-dir foo/bar/Foo
W: package-name: package-contains-hardlink foo/bar/Foo -> opt/file
""")
class OmnibusPackageBuilderTests(TestCase):
"""
Tests for ``omnibus_package_builder``.
"""
def test_centos_7(self):
self.assert_omnibus_steps(
distribution=Distribution(name='centos', version='7'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
)
def test_ubuntu_14_04(self):
self.assert_omnibus_steps(
distribution=Distribution(name='ubuntu', version='14.04'),
expected_category='admin',
expected_package_type=PackageTypes.DEB,
)
def assert_omnibus_steps(
self,
distribution=Distribution(name='centos', version='7'),
expected_category='Applications/System',
expected_package_type=PackageTypes.RPM,
):
"""
A sequence of build steps is returned.
"""
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
fake_dependencies = {
'python': [Dependency(package='python-dep')],
'node': [Dependency(package='node-dep')],
'docker-plugin': [Dependency(package='docker-plugin-dep')],
'cli': [Dependency(package='cli-dep')],
}
def fake_make_dependencies(
package_name, package_version, distribution):
return fake_dependencies[package_name]
self.patch(packaging, 'make_dependencies', fake_make_dependencies)
expected_destination_path = FilePath(self.mktemp())
target_path = FilePath(self.mktemp())
flocker_cli_path = target_path.child('flocker-cli')
flocker_node_path = target_path.child('flocker-node')
flocker_docker_plugin_path = target_path.child('flocker-docker-plugin')
empty_path = target_path.child('empty')
expected_virtualenv_path = FilePath('/opt/flocker')
expected_prefix = FilePath('/')
expected_epoch = PACKAGE.EPOCH.value
expected_package_uri = b'https://www.example.com/foo/Bar-1.2.3.whl'
expected_package_version_step = GetPackageVersion(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_name='flocker'
)
expected_version = DelayedRpmVersion(
package_version_step=expected_package_version_step
)
expected_license = PACKAGE.LICENSE.value
expected_url = PACKAGE.URL.value
expected_vendor = PACKAGE.VENDOR.value
expected_maintainer = PACKAGE.MAINTAINER.value
package_files = FilePath('/package-files')
expected = BuildSequence(
steps=(
# clusterhq-python-flocker steps
InstallVirtualEnv(
virtualenv=VirtualEnv(root=expected_virtualenv_path)),
InstallApplication(
virtualenv=VirtualEnv(root=expected_virtualenv_path),
package_uri=b'https://www.example.com/foo/Bar-1.2.3.whl',
),
expected_package_version_step,
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={
expected_virtualenv_path: expected_virtualenv_path
},
name='clusterhq-python-flocker',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='native',
description=PACKAGE_PYTHON.DESCRIPTION.value,
category=expected_category,
directories=[expected_virtualenv_path],
dependencies=[Dependency(package='python-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-python-flocker',
architecture="native",
),
# clusterhq-flocker-cli steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-deploy'),
flocker_cli_path),
(FilePath('/opt/flocker/bin/flocker'),
flocker_cli_path),
(FilePath('/opt/flocker/bin/flocker-ca'),
flocker_cli_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={flocker_cli_path: FilePath("/usr/bin")},
name='clusterhq-flocker-cli',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_CLI.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='cli-dep')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-cli',
architecture="all",
),
# clusterhq-flocker-node steps
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-volume'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-control'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-container-agent'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-dataset-agent'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-diagnostics'),
flocker_node_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={
flocker_node_path: FilePath("/usr/sbin"),
package_files.child('firewalld-services'):
FilePath("/usr/lib/firewalld/services/"),
# Ubuntu firewall configuration
package_files.child('ufw-applications.d'):
FilePath("/etc/ufw/applications.d/"),
# Systemd configuration
package_files.child('systemd'):
FilePath("/usr/lib/systemd/system/"),
# Upstart configuration
package_files.child('upstart'):
FilePath('/etc/init'),
# rsyslog configuration
package_files.child(b'rsyslog'):
FilePath(b"/etc/rsyslog.d"),
# Flocker Control State dir
empty_path: FilePath('/var/lib/flocker/'),
},
name='clusterhq-flocker-node',
prefix=expected_prefix,
epoch=expected_epoch,
rpm_version=expected_version,
license=expected_license,
url=expected_url,
vendor=expected_vendor,
maintainer=expected_maintainer,
architecture='all',
description=PACKAGE_NODE.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='node-dep')],
after_install=package_files.child('after-install.sh'),
directories=[FilePath('/var/lib/flocker/')],
),
LintPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-node',
architecture="all",
),
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-docker-plugin'),
flocker_docker_plugin_path),
]
),
BuildPackage(
package_type=expected_package_type,
destination_path=expected_destination_path,
source_paths={
flocker_docker_plugin_path: FilePath("/usr/sbin"),
# SystemD configuration
package_files.child('docker-plugin').child('systemd'):
FilePath('/usr/lib/systemd/system'),
# Upstart configuration
package_files.child('docker-plugin').child('upstart'):
FilePath('/etc/init'),
},
name='clusterhq-flocker-docker-plugin',
prefix=FilePath('/'),
epoch=expected_epoch,
rpm_version=expected_version,
license=PACKAGE.LICENSE.value,
url=PACKAGE.URL.value,
vendor=PACKAGE.VENDOR.value,
maintainer=PACKAGE.MAINTAINER.value,
architecture="all",
description=PACKAGE_DOCKER_PLUGIN.DESCRIPTION.value,
category=expected_category,
dependencies=[Dependency(package='docker-plugin-dep')],
),
LintPackage(
package_type=distribution.package_type(),
destination_path=expected_destination_path,
epoch=expected_epoch,
rpm_version=expected_version,
package='clusterhq-flocker-docker-plugin',
architecture="all",
),
)
)
assert_equal_steps(
self,
expected,
omnibus_package_builder(distribution=distribution,
destination_path=expected_destination_path,
package_uri=expected_package_uri,
target_dir=target_path,
package_files=FilePath('/package-files'),
))
class DockerBuildOptionsTests(TestCase):
"""
Tests for ``DockerBuildOptions``.
"""
native_package_type = object()
def setUp(self):
"""
Patch ``admin.packaging._native_package_type`` to return a fixed value.
"""
self.patch(
packaging, '_native_package_type',
lambda: self.native_package_type)
def test_defaults(self):
"""
``DockerBuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
}
self.assertEqual(expected_defaults, DockerBuildOptions())
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, DockerBuildOptions().parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_uri_supplied(self):
"""
``DockerBuildOptions`` saves the supplied ``package-uri``.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
options = DockerBuildOptions()
options.parseOptions([expected_uri])
self.assertEqual(expected_uri, options['package-uri'])
class DockerBuildScriptTests(TestCase):
"""
Tests for ``DockerBuildScript``.
"""
def test_usage_error_status(self):
"""
``DockerBuildScript.main`` raises ``SystemExit`` if there are missing
command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(SystemExit, script.main)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``DockerBuildScript.main`` prints a usage error to ``stderr`` if there
are missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = DockerBuildScript(sys_module=fake_sys_module)
try:
script.main()
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``DockerBuildScript.build_command`` is ``omnibus_package_builder`` by
default.
"""
self.assertIs(omnibus_package_builder, DockerBuildScript.build_command)
def test_run(self):
"""
``DockerBuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path=%s' % (expected_destination_path.path,),
expected_package_uri]
)
distribution = Distribution(name='test-distro', version='30')
self.patch(packaging, 'CURRENT_DISTRIBUTION', distribution)
script = DockerBuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main(top_level=FilePath('/top-level'))
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
package_uri=expected_package_uri,
distribution=distribution,
package_files=FilePath('/top-level/admin/package-files'))
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildOptionsTests(TestCase):
"""
Tests for ``BuildOptions``.
"""
DISTROS = [u"greatos"]
def test_defaults(self):
"""
``BuildOptions`` destination path defaults to the current working
directory.
"""
expected_defaults = {
'destination-path': '.',
'distribution': None,
}
self.assertEqual(expected_defaults, BuildOptions([]))
def test_possible_distributions(self):
"""
``BuildOptions`` offers as possible distributions all of the names
passed to its initializer.
"""
options = BuildOptions([b"greatos", b"betteros"])
description = options.docs["distribution"]
self.assertNotIn(
-1,
(description.find(b"greatos"), description.find(b"betteros")),
"Supplied distribution names, greatos and betteros, not found in "
"--distribution parameter definition: {}".format(description)
)
def test_distribution_missing(self):
"""
``BuildOptions.parseOptions`` raises ``UsageError`` if
``--distribution`` is not supplied.
"""
options = BuildOptions(self.DISTROS)
self.assertRaises(
UsageError,
options.parseOptions,
['http://example.com/fake/uri'])
def test_package_uri_missing(self):
"""
``DockerBuildOptions`` requires a single positional argument containing
the URI of the Python package which is being packaged.
"""
exception = self.assertRaises(
UsageError, BuildOptions(self.DISTROS).parseOptions, [])
self.assertEqual('Wrong number of arguments.', str(exception))
def test_package_options_supplied(self):
"""
``BuildOptions`` saves the supplied options.
"""
expected_uri = 'http://www.example.com/foo-bar.whl'
expected_distribution = 'ubuntu1404'
options = BuildOptions(self.DISTROS + [expected_distribution])
options.parseOptions(
['--distribution', expected_distribution, expected_uri])
self.assertEqual(
(expected_distribution, expected_uri),
(options['distribution'], options['package-uri'])
)
class AvailableDistributionTests(TestCase):
"""
Tests for ``available_distributions``.
"""
def test_dockerfiles(self):
"""
Directories in the ``admin/build_targets/`` sub-directory of the path
passed to ``available_distributions`` which themselves contain a
``Dockerfile`` are considered distributions and included in the result.
"""
root = FilePath(self.mktemp())
build_targets = root.descendant([b"admin", b"build_targets"])
build_targets.makedirs()
build_targets.child(b"foo").setContent(b"bar")
greatos = build_targets.child(b"greatos")
greatos.makedirs()
greatos.child(b"Dockerfile").setContent(
b"MAINTAINER example@example.invalid\n"
)
nothing = build_targets.child(b"nothing")
nothing.makedirs()
self.assertEqual(
{b"greatos"},
available_distributions(root),
)
class BuildScriptTests(TestCase):
"""
Tests for ``BuildScript``.
"""
def test_usage_error_status(self):
"""
``BuildScript.main`` raises ``SystemExit`` if there are missing command
line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
exception = self.assertRaises(
SystemExit,
script.main, top_level=FLOCKER_PATH)
self.assertEqual(1, exception.code)
def test_usage_error_message(self):
"""
``BuildScript.main`` prints a usage error to ``stderr`` if there are
missing command line options.
"""
fake_sys_module = FakeSysModule(argv=[])
script = BuildScript(sys_module=fake_sys_module)
try:
script.main(top_level=FLOCKER_PATH)
except SystemExit:
pass
self.assertEqual(
'Wrong number of arguments.',
fake_sys_module.stderr.getvalue().splitlines()[-1]
)
def test_build_command(self):
"""
``BuildScript.build_command`` is ``build_in_docker`` by default.
"""
self.assertIs(build_in_docker, BuildScript.build_command)
def test_run(self):
"""
``BuildScript.main`` calls ``run`` on the instance returned by
``build_command``.
"""
expected_destination_path = FilePath(self.mktemp())
expected_distribution = 'centos7'
expected_package_uri = 'http://www.example.com/foo/bar.whl'
fake_sys_module = FakeSysModule(
argv=[
'build-command-name',
'--destination-path', expected_destination_path.path,
'--distribution=%s' % (expected_distribution,),
expected_package_uri]
)
script = BuildScript(sys_module=fake_sys_module)
build_step = SpyStep()
arguments = []
def record_arguments(*args, **kwargs):
arguments.append((args, kwargs))
return build_step
script.build_command = record_arguments
script.main(top_level=FLOCKER_PATH)
expected_build_arguments = [(
(),
dict(destination_path=expected_destination_path,
distribution=expected_distribution,
package_uri=expected_package_uri,
top_level=FLOCKER_PATH)
)]
self.assertEqual(expected_build_arguments, arguments)
self.assertTrue(build_step.ran)
class BuildInDockerFunctionTests(TestCase):
"""
Tests for ``build_in_docker``.
"""
def test_steps(self):
"""
``build_in_docker`` returns a ``BuildSequence`` comprising
``DockerBuild`` and ``DockerRun`` instances.
"""
supplied_distribution = 'Foo'
expected_tag = 'clusterhq/build-%s' % (supplied_distribution,)
supplied_top_level = FilePath(self.mktemp())
expected_build_directory = supplied_top_level.descendant(
['admin', 'build_targets', supplied_distribution])
expected_build_directory.makedirs()
expected_build_directory.sibling('requirements.txt').setContent('')
supplied_destination_path = FilePath('/baz/qux')
expected_volumes = {
FilePath('/output'): supplied_destination_path,
FilePath('/flocker'): supplied_top_level,
}
expected_package_uri = 'http://www.example.com/foo/bar/whl'
assert_equal_steps(
test_case=self,
expected=BuildSequence(
steps=[
DockerBuild(
tag=expected_tag,
build_directory=expected_build_directory
),
DockerRun(
tag=expected_tag,
volumes=expected_volumes,
command=[expected_package_uri]
),
]
),
actual=build_in_docker(
destination_path=supplied_destination_path,
distribution=supplied_distribution,
top_level=supplied_top_level,
package_uri=expected_package_uri,
)
)
def test_copies_requirements(self):
"""
A requirements file is copied into the build directory.
"""
supplied_distribution = 'Foo'
supplied_top_level = FilePath(self.mktemp())
expected_build_directory = supplied_top_level.descendant(
['admin', 'build_targets', supplied_distribution])
expected_build_directory.makedirs()
requirements = 'some_requirement'
expected_build_directory.sibling('requirements.txt').setContent(
requirements)
supplied_destination_path = FilePath('/baz/qux')
expected_package_uri = 'http://www.example.com/foo/bar/whl'
build_in_docker(
destination_path=supplied_destination_path,
distribution=supplied_distribution,
top_level=supplied_top_level,
package_uri=expected_package_uri
)
self.assertEqual(
requirements,
expected_build_directory.child('requirements.txt').getContent()
)
class MakeDependenciesTests(TestCase):
"""
Tests for ``make_dependencies``.
"""
def test_node(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-node``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('node', expected_version,
Distribution(name='centos', version='7'))
)
def test_cli(self):
"""
``make_dependencies`` includes the supplied ``version`` of
``clusterhq-python-flocker`` for ``clusterhq-flocker-cli``.
"""
expected_version = '1.2.3'
self.assertIn(
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=expected_version
),
make_dependencies('cli', expected_version,
Distribution(name='centos', version='7'))
)
| |
#
# Copyright Ericsson AB 2013. All rights reserved
#
# Authors: Ildiko Vancsa <ildiko.vancsa@ericsson.com>
# Balazs Gibizer <balazs.gibizer@ericsson.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
import datetime
import time
import weakref
from oslo_config import cfg
from oslo_log import log
from oslo_utils import netutils
import pymongo
import pymongo.errors
import six
from six.moves.urllib import parse
from ceilometer.i18n import _, _LI
ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
LOG = log.getLogger(__name__)
# FIXME(dhellmann): Configuration options are not part of the Oslo
# library APIs, and should not be used like this.
cfg.CONF.import_opt('max_retries', 'oslo_db.options', group="database")
cfg.CONF.import_opt('retry_interval', 'oslo_db.options', group="database")
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'}
MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4]
COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6]
FINALIZE_AGGREGATION_LAMBDA = lambda result, param=None: float(result)
CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id',
'user_id',
'project_id',
'source'])
def make_timestamp_range(start, end,
start_timestamp_op=None, end_timestamp_op=None):
"""Create the query document to find timestamps within that range.
This is done by given two possible datetimes and their operations.
By default, using $gte for the lower bound and $lt for the upper bound.
"""
ts_range = {}
if start:
if start_timestamp_op == 'gt':
start_timestamp_op = '$gt'
else:
start_timestamp_op = '$gte'
ts_range[start_timestamp_op] = start
if end:
if end_timestamp_op == 'le':
end_timestamp_op = '$lte'
else:
end_timestamp_op = '$lt'
ts_range[end_timestamp_op] = end
return ts_range
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
query = {}
q_list = []
ts_range = make_timestamp_range(event_filter.start_timestamp,
event_filter.end_timestamp)
if ts_range:
q_list.append({'timestamp': ts_range})
if event_filter.event_type:
q_list.append({'event_type': event_filter.event_type})
if event_filter.message_id:
q_list.append({'_id': event_filter.message_id})
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
op = trait_filter.pop('op', 'eq')
dict_query = {}
for k, v in six.iteritems(trait_filter):
if v is not None:
# All parameters in EventFilter['traits'] are optional, so
# we need to check if they are in the query or no.
if k == 'key':
dict_query.setdefault('trait_name', v)
elif k in ['string', 'integer', 'datetime', 'float']:
dict_query.setdefault('trait_type',
EVENT_TRAIT_TYPES[k])
dict_query.setdefault('trait_value',
v if op == 'eq'
else {OP_SIGN[op]: v})
dict_query = {'$elemMatch': dict_query}
q_list.append({'traits': dict_query})
if event_filter.admin_proj:
q_list.append({'$or': [
{'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}},
{'traits': {
'$elemMatch': {'trait_name': 'project_id',
'trait_value': event_filter.admin_proj}}}]})
if q_list:
query = {'$and': q_list}
return query
def make_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
q = {}
if sample_filter.user:
q['user_id'] = sample_filter.user
if sample_filter.project:
q['project_id'] = sample_filter.project
if sample_filter.meter:
q['counter_name'] = sample_filter.meter
elif require_meter:
raise RuntimeError('Missing required meter specifier')
ts_range = make_timestamp_range(sample_filter.start_timestamp,
sample_filter.end_timestamp,
sample_filter.start_timestamp_op,
sample_filter.end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
if sample_filter.resource:
q['resource_id'] = sample_filter.resource
if sample_filter.source:
q['source'] = sample_filter.source
if sample_filter.message_id:
q['message_id'] = sample_filter.message_id
# so the samples call metadata resource_metadata, so we convert
# to that.
q.update(dict(
('resource_%s' % k, v) for (k, v) in six.iteritems(
improve_keys(sample_filter.metaquery, metaquery=True))))
return q
def quote_key(key, reverse=False):
"""Prepare key for storage data in MongoDB.
:param key: key that should be quoted
:param reverse: boolean, True --- if we need a reverse order of the keys
parts
:return: iter of quoted part of the key
"""
r = -1 if reverse else 1
for k in key.split('.')[::r]:
if k.startswith('$'):
k = parse.quote(k)
yield k
def improve_keys(data, metaquery=False):
"""Improves keys in dict if they contained '.' or started with '$'.
:param data: is a dictionary where keys need to be checked and improved
:param metaquery: boolean, if True dots are not escaped from the keys
:return: improved dictionary if keys contained dots or started with '$':
{'a.b': 'v'} -> {'a': {'b': 'v'}}
{'$ab': 'v'} -> {'%24ab': 'v'}
"""
if not isinstance(data, dict):
return data
if metaquery:
for key in six.iterkeys(data):
if '.$' in key:
key_list = []
for k in quote_key(key):
key_list.append(k)
new_key = '.'.join(key_list)
data[new_key] = data.pop(key)
else:
for key, value in data.items():
if isinstance(value, dict):
improve_keys(value)
if '.' in key:
new_dict = {}
for k in quote_key(key, reverse=True):
new = {}
new[k] = new_dict if new_dict else data.pop(key)
new_dict = new
data.update(new_dict)
else:
if key.startswith('$'):
new_key = parse.quote(key)
data[new_key] = data.pop(key)
return data
def unquote_keys(data):
"""Restores initial view of 'quoted' keys in dictionary data
:param data: is a dictionary
:return: data with restored keys if they were 'quoted'.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, dict):
unquote_keys(value)
if key.startswith('%24'):
k = parse.unquote(key)
data[k] = data.pop(key)
return data
class ConnectionPool(object):
def __init__(self):
self._pool = {}
def connect(self, url):
connection_options = pymongo.uri_parser.parse_uri(url)
del connection_options['database']
del connection_options['username']
del connection_options['password']
del connection_options['collection']
pool_key = tuple(connection_options)
if pool_key in self._pool:
client = self._pool.get(pool_key)()
if client:
return client
splitted_url = netutils.urlsplit(url)
log_data = {'db': splitted_url.scheme,
'nodelist': connection_options['nodelist']}
LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data)
client = self._mongo_connect(url)
self._pool[pool_key] = weakref.ref(client)
return client
@staticmethod
def _mongo_connect(url):
try:
return MongoProxy(pymongo.MongoClient(url))
except pymongo.errors.ConnectionFailure as e:
LOG.warn(_('Unable to connect to the database server: '
'%(errmsg)s.') % {'errmsg': e})
raise
class QueryTransformer(object):
operators = {"<": "$lt",
">": "$gt",
"<=": "$lte",
"=<": "$lte",
">=": "$gte",
"=>": "$gte",
"!=": "$ne",
"in": "$in",
"=~": "$regex"}
complex_operators = {"or": "$or",
"and": "$and"}
ordering_functions = {"asc": pymongo.ASCENDING,
"desc": pymongo.DESCENDING}
def transform_orderby(self, orderby):
orderby_filter = []
for field in orderby:
field_name = list(field.keys())[0]
ordering = self.ordering_functions[list(field.values())[0]]
orderby_filter.append((field_name, ordering))
return orderby_filter
@staticmethod
def _move_negation_to_leaf(condition):
"""Moves every not operator to the leafs.
Moving is going by applying the De Morgan rules and annihilating
double negations.
"""
def _apply_de_morgan(tree, negated_subtree, negated_op):
if negated_op == "and":
new_op = "or"
else:
new_op = "and"
tree[new_op] = [{"not": child}
for child in negated_subtree[negated_op]]
del tree["not"]
def transform(subtree):
op = list(subtree.keys())[0]
if op in ["and", "or"]:
[transform(child) for child in subtree[op]]
elif op == "not":
negated_tree = subtree[op]
negated_op = list(negated_tree.keys())[0]
if negated_op == "and":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "or":
_apply_de_morgan(subtree, negated_tree, negated_op)
transform(subtree)
elif negated_op == "not":
# two consecutive not annihilates themselves
value = list(negated_tree.values())[0]
new_op = list(value.keys())[0]
subtree[new_op] = negated_tree[negated_op][new_op]
del subtree["not"]
transform(subtree)
transform(condition)
def transform_filter(self, condition):
# in Mongo not operator can only be applied to
# simple expressions so we have to move every
# not operator to the leafs of the expression tree
self._move_negation_to_leaf(condition)
return self._process_json_tree(condition)
def _handle_complex_op(self, complex_op, nodes):
element_list = []
for node in nodes:
element = self._process_json_tree(node)
element_list.append(element)
complex_operator = self.complex_operators[complex_op]
op = {complex_operator: element_list}
return op
def _handle_not_op(self, negated_tree):
# assumes that not is moved to the leaf already
# so we are next to a leaf
negated_op = list(negated_tree.keys())[0]
negated_field = list(negated_tree[negated_op].keys())[0]
value = negated_tree[negated_op][negated_field]
if negated_op == "=":
return {negated_field: {"$ne": value}}
elif negated_op == "!=":
return {negated_field: value}
else:
return {negated_field: {"$not":
{self.operators[negated_op]: value}}}
def _handle_simple_op(self, simple_op, nodes):
field_name = list(nodes.keys())[0]
field_value = list(nodes.values())[0]
# no operator for equal in Mongo
if simple_op == "=":
op = {field_name: field_value}
return op
operator = self.operators[simple_op]
op = {field_name: {operator: field_value}}
return op
def _process_json_tree(self, condition_tree):
operator_node = list(condition_tree.keys())[0]
nodes = list(condition_tree.values())[0]
if operator_node in self.complex_operators:
return self._handle_complex_op(operator_node, nodes)
if operator_node == "not":
negated_tree = condition_tree[operator_node]
return self._handle_not_op(negated_tree)
return self._handle_simple_op(operator_node, nodes)
def safe_mongo_call(call):
def closure(*args, **kwargs):
max_retries = cfg.CONF.database.max_retries
retry_interval = cfg.CONF.database.retry_interval
attempts = 0
while True:
try:
return call(*args, **kwargs)
except pymongo.errors.AutoReconnect as err:
if 0 <= max_retries <= attempts:
LOG.error(_('Unable to reconnect to the primary mongodb '
'after %(retries)d retries. Giving up.') %
{'retries': max_retries})
raise
LOG.warn(_('Unable to reconnect to the primary mongodb: '
'%(errmsg)s. Trying again in %(retry_interval)d '
'seconds.') %
{'errmsg': err, 'retry_interval': retry_interval})
attempts += 1
time.sleep(retry_interval)
return closure
class MongoConn(object):
def __init__(self, method):
self.method = method
@safe_mongo_call
def __call__(self, *args, **kwargs):
return self.method(*args, **kwargs)
MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection)
if not typ.startswith('_')])
MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient)
if not typ.startswith('_')]))
MONGO_METHODS.update(set([typ for typ in dir(pymongo)
if not typ.startswith('_')]))
class MongoProxy(object):
def __init__(self, conn):
self.conn = conn
def __getitem__(self, item):
"""Create and return proxy around the method in the connection.
:param item: name of the connection
"""
return MongoProxy(self.conn[item])
def find(self, *args, **kwargs):
# We need this modifying method to return a CursorProxy object so that
# we can handle the Cursor next function to catch the AutoReconnect
# exception.
return CursorProxy(self.conn.find(*args, **kwargs))
def create_index(self, keys, name=None, *args, **kwargs):
try:
self.conn.create_index(keys, name=name, *args, **kwargs)
except pymongo.errors.OperationFailure as e:
if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
LOG.info(_LI("Index %s will be recreate.") % name)
self._recreate_index(keys, name, *args, **kwargs)
@safe_mongo_call
def _recreate_index(self, keys, name, *args, **kwargs):
self.conn.drop_index(name)
self.conn.create_index(keys, name=name, *args, **kwargs)
def __getattr__(self, item):
"""Wrap MongoDB connection.
If item is the name of an executable method, for example find or
insert, wrap this method in the MongoConn.
Else wrap getting attribute with MongoProxy.
"""
if item in ('name', 'database'):
return getattr(self.conn, item)
if item in MONGO_METHODS:
return MongoConn(getattr(self.conn, item))
return MongoProxy(getattr(self.conn, item))
def __call__(self, *args, **kwargs):
return self.conn(*args, **kwargs)
class CursorProxy(pymongo.cursor.Cursor):
def __init__(self, cursor):
self.cursor = cursor
def __getitem__(self, item):
return self.cursor[item]
@safe_mongo_call
def next(self):
"""Wrap Cursor next method.
This method will be executed before each Cursor next method call.
"""
try:
save_cursor = self.cursor.clone()
return self.cursor.next()
except pymongo.errors.AutoReconnect:
self.cursor = save_cursor
raise
def __getattr__(self, item):
return getattr(self.cursor, item)
class AggregationFields(object):
def __init__(self, version,
group,
project,
finalize=None,
parametrized=False,
validate=None):
self._finalize = finalize or FINALIZE_AGGREGATION_LAMBDA
self.group = lambda *args: group(*args) if parametrized else group
self.project = (lambda *args: project(*args)
if parametrized else project)
self.version = version
self.validate = validate or (lambda name, param: True)
def finalize(self, name, data, param=None):
field = ("%s" % name) + ("/%s" % param if param else "")
return {field: (self._finalize(data.get(field))
if self._finalize else data.get(field))}
class Aggregation(object):
def __init__(self, name, aggregation_fields):
self.name = name
aggregation_fields = (aggregation_fields
if isinstance(aggregation_fields, list)
else [aggregation_fields])
self.aggregation_fields = sorted(aggregation_fields,
key=lambda af: getattr(af, "version"),
reverse=True)
def _get_compatible_aggregation_field(self, version_array):
if version_array:
version_array = version_array[0:2]
else:
version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION
for aggregation_field in self.aggregation_fields:
if version_array >= aggregation_field.version:
return aggregation_field
def group(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.group(param)
def project(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.project(param)
def finalize(self, data, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.finalize(self.name, data, param)
def validate(self, param=None, version_array=None):
af = self._get_compatible_aggregation_field(version_array)
return af.validate(self.name, param)
SUM_AGGREGATION = Aggregation(
"sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"sum": {"$sum": "$counter_volume"}},
{"sum": "$sum"}))
AVG_AGGREGATION = Aggregation(
"avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"avg": {"$avg": "$counter_volume"}},
{"avg": "$avg"}))
MIN_AGGREGATION = Aggregation(
"min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"min": {"$min": "$counter_volume"}},
{"min": "$min"}))
MAX_AGGREGATION = Aggregation(
"max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"max": {"$max": "$counter_volume"}},
{"max": "$max"}))
COUNT_AGGREGATION = Aggregation(
"count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"count": {"$sum": 1}},
{"count": "$count"}))
STDDEV_AGGREGATION = Aggregation(
"stddev",
AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
{"std_square": {
"$sum": {
"$multiply": ["$counter_volume",
"$counter_volume"]
}},
"std_count": {"$sum": 1},
"std_sum": {"$sum": "$counter_volume"}},
{"stddev": {
"count": "$std_count",
"sum": "$std_sum",
"square_sum": "$std_square"}},
lambda stddev: ((stddev['square_sum']
* stddev['count']
- stddev["sum"] ** 2) ** 0.5
/ stddev['count'])))
CARDINALITY_AGGREGATION = Aggregation(
"cardinality",
# $cond operator available only in MongoDB 2.6+
[AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION,
lambda field: ({"cardinality/%s" % field:
{"$addToSet": "$%s" % field}}),
lambda field: {
"cardinality/%s" % field: {
"$cond": [
{"$eq": ["$cardinality/%s" % field, None]},
0,
{"$size": "$cardinality/%s" % field}]
}},
validate=CARDINALITY_VALIDATION,
parametrized=True),
AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
lambda field: ({"cardinality/%s" % field:
{"$addToSet": "$%s" % field}}),
lambda field: ({"cardinality/%s" % field:
"$cardinality/%s" % field}),
finalize=len,
validate=CARDINALITY_VALIDATION,
parametrized=True)]
)
def to_unix_timestamp(timestamp):
if isinstance(timestamp, datetime.datetime):
return int(time.mktime(timestamp.timetuple()))
return timestamp
def from_unix_timestamp(timestamp):
if (isinstance(timestamp, six.integer_types) or
isinstance(timestamp, float)):
return datetime.datetime.fromtimestamp(timestamp)
return timestamp
| |
from __future__ import division
from itertools import chain
from operator import itemgetter, attrgetter
from collections import defaultdict
import math
from django.db import connection
from django.db import models
from django.db.models import Sum, Q, Count, F
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from actstream.models import Follow
from laws.models import VoteAction, Vote
from mks.models import Party, Member, Knesset, Membership
import queries
from tagging.models import Tag
AGENDAVOTE_SCORE_CHOICES = (
('',_("Not selected")),
(-1.0, _("Opposes fully")),
(-0.5, _("Opposes partially")),
(0.0, _("Agnostic")),
(0.5, _("Complies partially")),
(1.0, _("Complies fully")),
)
IMPORTANCE_CHOICES = (
('',_("Not selected")),
(0.0, _("Marginal Importance")),
(0.3, _("Medium Importance")),
(0.6, _("High Importance")),
(1.0, _("Very High Importance")),
)
class UserSuggestedVote(models.Model):
agenda = models.ForeignKey('Agenda', related_name='user_suggested_votes')
vote = models.ForeignKey('laws.Vote', related_name='user_suggested_agendas')
reasoning = models.TextField(blank=True, default='')
user = models.ForeignKey(User, related_name='suggested_agenda_votes')
sent_to_editor = models.BooleanField(default=False)
class Meta:
unique_together = ('agenda','vote','user')
class AgendaVoteManager(models.Manager):
db_month_trunc_functions = {
'sqlite3':{'monthfunc':"strftime('%%Y-%%m-01'",'nowfunc':'date()'},
'postgresql_psycopg2':{'monthfunc':"date_trunc('month'",'nowfunc':'now()'}
}
def compute_all(self):
db_engine = settings.DATABASES['default']['ENGINE']
db_functions = self.db_month_trunc_functions[db_engine.split('.')[-1]]
agenda_query = queries.BASE_AGENDA_QUERY % db_functions
cursor = connection.cursor()
cursor.execute(agenda_query)
mk_query = queries.BASE_MK_QUERY % db_functions
cursor.execute(mk_query)
party_query = queries.BASE_PARTY_QUERY % db_functions
cursor.execute(party_query)
class AgendaVote(models.Model):
agenda = models.ForeignKey('Agenda', related_name='agendavotes')
vote = models.ForeignKey('laws.Vote', related_name='agendavotes')
score = models.FloatField(default=0.0, choices=AGENDAVOTE_SCORE_CHOICES)
importance = models.FloatField(default=1.0, choices=IMPORTANCE_CHOICES)
reasoning = models.TextField(null=True,blank=True)
objects = AgendaVoteManager()
def detail_view_url(self):
return reverse('agenda-vote-detail', args=[self.pk])
def get_score_header(self):
return _('Position')
def get_importance_header(self):
return _('Importance')
class Meta:
unique_together= ('agenda', 'vote')
def __unicode__(self):
return u"%s %s" % (self.agenda,self.vote)
def update_monthly_counters(self):
agendaScore = float(self.score) * float(self.importance)
objMonth = dateMonthTruncate(self.vote.time)
summaryObjects = list(SummaryAgenda.objects.filter( agenda=self.agenda,
month=objMonth).all())
agendaSummary = None
if not filter(lambda summary:summary.summary_type=='AG',summaryObjects):
agendaSummary = SummaryAgenda(month=objMonth,
agenda=self.agenda,
summary_type='AG',
score=abs(agendaScore),
votes=1)
else:
existingAgenda = filter(lambda summary:summary.summary_type=='AG',summaryObjects)[0]
existingAgenda.votes += 1
existingAgenda.score += abs(agendaScore)
existingAgenda.save()
agendasByMk = dict(map(lambda summary:(summary.mk_id,summary),
filter(lambda summary:summary.summary_type=='MK',
summaryObjects)))
newObjects = []
if agendaSummary:
newObjects.append(agendaSummary)
voters = defaultdict(list)
for vote_action in self.vote.voteaction_set.all():
mkSummary = agendasByMk.get(vote_action.member_id, None)
if not mkSummary:
mkSummary = SummaryAgenda(month=objMonth,
agenda=self.agenda,
summary_type='MK',
mk_id=vote_action.member_id,
votes=1,
score=agendaScore * (1 if vote_action.type == 'for' else -1),
for_votes=(1 if vote_action.type == 'for' else 0),
against_votes=(1 if vote_action.type == 'against' else 0))
newObjects.append(mkSummary)
else:
voters[vote_action.type].append(vote_action.member_id)
SummaryAgenda.objects.filter(agenda=self.agenda,mk_id__in=voters['for'],month=objMonth).update(votes=F('votes') + 1, score=F('score')+agendaScore, for_votes=F('for_votes') + 1)
SummaryAgenda.objects.filter(agenda=self.agenda,mk_id__in=voters['against'],month=objMonth).update(votes=F('votes') + 1, score=F('score')-agendaScore, against_votes=F('against_votes') + 1)
if newObjects:
SummaryAgenda.objects.bulk_create(newObjects)
def save(self,*args,**kwargs):
super(AgendaVote,self).save(*args,**kwargs)
self.update_monthly_counters()
class AgendaMeeting(models.Model):
agenda = models.ForeignKey('Agenda', related_name='agendameetings')
meeting = models.ForeignKey('committees.CommitteeMeeting',
related_name='agendacommitteemeetings')
score = models.FloatField(default=0.0, choices=IMPORTANCE_CHOICES)
reasoning = models.TextField(null=True)
def detail_view_url(self):
return reverse('agenda-meeting-detail', args=[self.pk])
def get_score_header(self):
return _('Importance')
def get_importance_header(self):
return ''
class Meta:
unique_together = ('agenda', 'meeting')
def __unicode__(self):
return u"%s %s" % (self.agenda,self.meeting)
class AgendaBill(models.Model):
agenda = models.ForeignKey('Agenda', related_name='agendabills')
bill = models.ForeignKey('laws.bill', related_name='agendabills')
score = models.FloatField(default=0.0, choices=AGENDAVOTE_SCORE_CHOICES)
importance = models.FloatField(default=1.0, choices=IMPORTANCE_CHOICES)
reasoning = models.TextField(null=True)
def detail_view_url(self):
return reverse('agenda-bill-detail', args=[self.pk])
def get_score_header(self):
return _('Position')
def get_importance_header(self):
return _('Importance')
class Meta:
unique_together = ('agenda', 'bill')
def __unicode__(self):
return u"%s %s" % (self.agenda,self.bill)
def get_top_bottom(lst, top, bottom):
"""
Returns a cropped list, keeping some of the list's top and bottom.
Edge conditions are handled gracefuly.
Input list should be ascending so that top is at the end.
"""
if len(lst) < top+bottom:
delta = top+bottom - len(lst)
bottom = bottom - int(math.floor(delta/2))
if delta%2:
top = top - int(math.floor(delta/2)) -1
else:
top = top - int(math.floor(delta/2))
if top and bottom:
top_lst = lst[-top:]
bottom_lst = lst[:bottom]
elif top:
top_lst = lst[-top:]
bottom_lst = []
elif bottom:
top_lst = []
bottom_lst = lst[:bottom]
else:
top_lst = []
bottom_lst = []
return {'top':top_lst,
'bottom':bottom_lst}
class AgendaManager(models.Manager):
def get_selected_for_instance(self, instance, user=None, top=3, bottom=3):
# Returns interesting agendas for model instances such as: member, party
agendas = list(self.get_relevant_for_user(user))
for agenda in agendas:
agenda.score = agenda.__getattribute__('%s_score' % instance.__class__.__name__.lower())(instance)
agenda.significance = agenda.score * agenda.num_followers
agendas.sort(key=attrgetter('significance'))
agendas = get_top_bottom(agendas, top, bottom)
agendas['top'].sort(key=attrgetter('score'), reverse=True)
agendas['bottom'].sort(key=attrgetter('score'), reverse=True)
return agendas
def get_relevant_for_mk(self, mk, agendaId):
agendas = AgendaVote.objects.filter(agenda__id=agendaId,vote__votes__id=mk).distinct()
return agendas
def get_relevant_for_user(self, user):
if user == None or not user.is_authenticated():
agendas = Agenda.objects.filter(is_public=True)\
.order_by('-num_followers')\
.prefetch_related('agendavotes')
elif user.is_superuser:
agendas = Agenda.objects.all().order_by('-num_followers')\
.prefetch_related('agendavotes')
else:
agendas = Agenda.objects.filter(Q(is_public=True) |
Q(editors=user))\
.order_by('-num_followers')\
.prefetch_related('agendavotes')\
.distinct()
return agendas
def get_possible_to_suggest(self, user, vote):
if user == None or not user.is_authenticated():
agendas = False
else:
agendas = Agenda.objects.filter(is_public=True)\
.exclude(editors=user)\
.exclude(agendavotes__vote=vote)\
.distinct()
return agendas
def get_mks_values(self):
mks_values = cache.get('agendas_mks_values')
if not mks_values:
q = queries.agendas_mks_grade()
# outer join - add missing mks to agendas
newAgendaMkVotes = {}
# generates a set of all the current mk ids that have ever voted for any agenda
# its not perfect, but its better than creating another query to generate all known mkids
allMkIds = set(map(itemgetter(0),chain.from_iterable(q.values())))
for agendaId,agendaVotes in q.items():
# the newdict will have 0's for each mkid, the update will change the value for known mks
newDict = {}.fromkeys(allMkIds,(0,0,0))
newDict.update(dict(map(lambda (mkid,score,volume,numvotes):(mkid,(score,volume,numvotes)),agendaVotes)))
newAgendaMkVotes[agendaId]=newDict.items()
mks_values = {}
for agenda_id, scores in newAgendaMkVotes.items():
mks_values[agenda_id] = \
map(lambda x: (x[1][0], dict(score=x[1][1][0], rank=x[0], volume=x[1][1][1], numvotes=x[1][1][2])),
enumerate(sorted(scores,key=lambda x:x[1][0],reverse=True), 1))
cache.set('agendas_mks_values', mks_values, 1800)
return mks_values
# def get_mks_values(self,ranges=None):
# if ranges is None:
# ranges = [[None,None]]
# mks_values = False
# if ranges == [[None,None]]:
# mks_values = cache.get('agendas_mks_values')
# if not mks_values:
# # get list of mk ids
# # generate summary query
# # query summary
# # split data into appropriate ranges
# # compute agenda measures per range
# # add missing mks while you're there
# q = queries.getAllAgendaMkVotes()
# # outer join - add missing mks to agendas
# newAgendaMkVotes = {}
# # generates a set of all the current mk ids that have ever voted for any agenda
# # its not perfect, but its better than creating another query to generate all known mkids
# allMkIds = set(map(itemgetter(0),chain.from_iterable(q.values())))
# for agendaId,agendaVotes in q.items():
# # the newdict will have 0's for each mkid, the update will change the value for known mks
# newDict = {}.fromkeys(allMkIds,(0,0,0))
# newDict.update(dict(map(lambda (mkid,score,volume,numvotes):(mkid,(score,volume,numvotes)),agendaVotes)))
# newAgendaMkVotes[agendaId]=newDict.items()
# mks_values = {}
# for agenda_id, scores in newAgendaMkVotes.items():
# mks_values[agenda_id] = \
# map(lambda x: (x[1][0], dict(score=x[1][1][0], rank=x[0], volume=x[1][1][1], numvotes=x[1][1][2])),
# enumerate(sorted(scores,key=lambda x:x[1][0],reverse=True), 1))
# if ranges = [[None,None]]:
# cache.set('agendas_mks_values', mks_values, 1800)
# return mks_values
def get_all_party_values(self):
return queries.getAllAgendaPartyVotes()
class Agenda(models.Model):
name = models.CharField(max_length=200)
description = models.TextField(null=True,blank=True)
editors = models.ManyToManyField('auth.User', related_name='agendas')
votes = models.ManyToManyField('laws.Vote',through=AgendaVote)
public_owner_name = models.CharField(max_length=100)
is_public = models.BooleanField(default=False)
num_followers = models.IntegerField(default=0)
image = models.ImageField(blank=True, null=True, upload_to='agendas')
category_id = models.ForeignKey(Tag, related_name='agendas', blank=True, null=True)
number_knesset = models.ForeignKey(Knesset, related_name='agendas', blank=True, null=True)
objects = AgendaManager()
class Meta:
verbose_name = _('Agenda')
verbose_name_plural = _('Agendas')
unique_together = (("name", "public_owner_name"),)
def __unicode__(self):
return u"%s %s %s" % (self.name,_('edited by'),self.public_owner_name)
@models.permalink
def get_absolute_url(self):
return ('agenda-detail', [str(self.id)])
@models.permalink
def get_edit_absolute_url(self):
return ('agenda-detail-edit', [str(self.id)])
def member_score(self, member):
values = self.get_mks_values(mks=[member])
if values:
if len(values)>1:
raise Member.MultipleObjectsReturned
return values[0][1]['score']
else:
return 0.0
def party_score(self, party):
# Since we're already calculating python side, no need to do 2 queries
# with joins, select for and against, and calcualte the things
qs = AgendaVote.objects.filter(
agenda=self, vote__voteaction__member__in=party.members.all(),
vote__voteaction__type__in=['against', 'for']).extra(
select={'weighted_score': 'agendas_agendavote.score*agendas_agendavote.importance'}
).values_list('weighted_score', 'vote__voteaction__type')
for_score = 0
against_score = 0
for score, action_type in qs:
if action_type == 'against':
against_score += score
else:
for_score += score
#max_score = sum([abs(x) for x in self.agendavotes.values_list('score', flat=True)]) * party.members.count()
# To save the queries, make sure to pass prefetch/select related
# Removed the values call, so that we can utilize the prefetched stuf
# This reduces the number of queries when called for example from
# AgendaResource.dehydrate
max_score = sum(abs(x.score * x.importance) for x in
self.agendavotes.all()) * party.number_of_seats
if max_score > 0:
return (for_score - against_score) / max_score * 100
else:
return 0.0
def candidate_list_score(self, candidate_list):
# Since we're already calculating python side, no need to do 2 queries
# with joins, select for and against, and calcualte the things
qs = AgendaVote.objects.filter(
agenda=self, vote__voteaction__member__in=candidate_list.member_ids,
vote__voteaction__type__in=['against', 'for']).extra(
select={'weighted_score': 'agendas_agendavote.score*agendas_agendavote.importance'}
).values_list('weighted_score', 'vote__voteaction__type')
for_score = 0
against_score = 0
for score, action_type in qs:
if action_type == 'against':
against_score += score
else:
for_score += score
#max_score = sum([abs(x) for x in self.agendavotes.values_list('score', flat=True)]) * party.members.count()
# To save the queries, make sure to pass prefetch/select related
# Removed the values call, so that we can utilize the prefetched stuf
# This reduces the number of queries when called for example from
# AgendaResource.dehydrate
max_score = sum(abs(x.score * x.importance) for x in
self.agendavotes.all()) * len(candidate_list.member_ids)
if max_score > 0:
return (for_score - against_score) / max_score * 100
else:
return 0.0
def related_mk_votes(self,member):
# Find all votes that
# 1) This agenda is ascribed to
# 2) the member participated in and either voted for or against
# for_votes = AgendaVote.objects.filter(agenda=self,vote__voteaction__member=member,vote__voteaction__type="for").distinct()
#against_votes = AgendaVote.objects.filter(agenda=self,vote__voteaction__member=member,vote__voteaction__type="against").distinct()
vote_actions = VoteAction.objects.filter(member=member,vote__agendavotes__agenda=self)
all_votes = AgendaVote.objects.filter(agenda=self,vote__voteaction__member=member).distinct()
# TODO: improve ugly code below
member_votes = list()
for member_vote in all_votes:
for vote_action in vote_actions:
if (vote_action.vote == member_vote.vote):
member_votes.insert(0,member_vote)
member_votes[0].voteaction = vote_action
return member_votes
#return AgendaVote.objects.filter(agenda=self,vote__voteaction__member=mk).distinct()
def selected_instances(self, cls, top=3, bottom=3):
instances = list(cls.objects.all())
for instance in instances:
instance.score = self.__getattribute__('%s_score' % instance.__class__.__name__.lower())(instance)
instances.sort(key=attrgetter('score'))
instances = get_top_bottom(instances, top, bottom)
instances['top'].sort(key=attrgetter('score'), reverse=True)
instances['bottom'].sort(key=attrgetter('score'), reverse=True)
return instances
@staticmethod
def generateSummaryFilters(ranges, start_fieldname, end_fieldname):
if not ranges:
return
filter_list = []
for r in ranges:
if not r[0] and not r[1]:
return None # might as well not filter at all
query_fields = {}
if r[0]:
query_fields[start_fieldname + '__gte']=r[0]
if r[1]:
query_fields[end_fieldname + '__lt']=r[1]
filter_list.append(Q(**query_fields))
if len(filter_list) == 1:
filters_folded = filter_list[0]
else: # len(filter_list) > 1
filters_folded = reduce(lambda x, y: x | y, filter_list)
return filters_folded
def get_mks_totals(self, member):
"Get count for each vote type for a specific member on this agenda"
# let's split qs to make it more readable
qs = VoteAction.objects.filter(member=member, type__in=('for', 'against'), vote__agendavotes__agenda=self)
qs = list(qs.values('type').annotate(total=Count('id')))
totals = sum(x['total'] for x in qs)
qs.append({'type': 'no-vote', 'total': self.votes.count() - totals})
return qs
def get_mks_values(self, ranges=None, mks=None):
if ranges is None:
ranges = [[dateMonthTruncate(Knesset.objects.current_knesset().start_date),None]]
mks_values = False
mk_ids = [mk.id for mk in mks] if mks else []
fullRange = ranges == [[None,None]]
if fullRange:
mks_values = cache.get('agenda_%d_mks_values' % self.id)
if mks_values and mks:
mks_values = [(mk_id, values)
for (mk_id, values) in mks_values
if mk_id in mk_ids]
if not mks_values:
# get list of mk ids
mk_ids = mk_ids or Membership.objects.membership_in_range(ranges)
# generate summary query
filters_folded = self.generateSummaryFilters(ranges, 'month', 'month')
# query summary
baseQuerySet = SummaryAgenda.objects.filter(agenda=self)
if filters_folded:
baseQuerySet.filter(filters_folded)
summaries = list(baseQuerySet)
# group summaries for respective ranges
summariesForRanges = []
for r in ranges:
summariesForRange = defaultdict(list)
for s in summaries:
if (not r[0] or s.month>=r[0]) and \
(not r[1] or s.month<r[1]):
summariesForRange[s.summary_type].append(s)
summariesForRanges.append(summariesForRange)
# compute agenda measures, store results per MK
mk_results = dict(map(lambda mk_id:(mk_id,[]),mk_ids))
for summaries in summariesForRanges:
agenda_data = summaries['AG']
total_votes = sum(map(attrgetter('votes'),agenda_data))
total_for_votes = sum(map(attrgetter('for_votes'),agenda_data))
total_against_votes = sum(map(attrgetter('against_votes'),agenda_data))
total_score = sum(map(attrgetter('score'),agenda_data))
current_mks_data = indexby(summaries['MK'],attrgetter('mk_id'))
# calculate results per mk
rangeMkResults = []
for mk_id in mk_results.keys():
mk_data = current_mks_data[mk_id]
if mk_data:
mk_votes = sum(map(attrgetter('votes'),mk_data))
mk_for_votes = sum(map(attrgetter('for_votes'),mk_data))
mk_against_votes = sum(map(attrgetter('against_votes'),mk_data))
mk_volume = 100*mk_votes/total_votes
mk_score = 100*sum(map(attrgetter('score'),mk_data))/total_score if total_score != 0 else 0
rangeMkResults.append((mk_id,mk_votes,mk_for_votes,mk_against_votes,mk_score,mk_volume))
else:
rangeMkResults.append(tuple([mk_id]+[0]*5))
# sort results by score descending
for rank,(mk_id,mk_votes,mk_for_votes,mk_against_votes,mk_score,mk_volume) in enumerate(sorted(rangeMkResults,key=itemgetter(4,0),reverse=True)):
mk_range_data = dict(score=mk_score,rank=rank,volume=mk_volume,numvotes=mk_votes,numforvotes=mk_for_votes,numagainstvotes=mk_against_votes)
if len(ranges)==1:
mk_results[mk_id]=mk_range_data
else:
mk_results[mk_id].append(mk_range_data)
if fullRange:
cache.set('agenda_%d_mks_values' % self.id, mks_values, 1800)
if len(ranges)==1:
mk_results = sorted(mk_results.items(),key=lambda (k,v):v['rank'])
return mk_results
def get_mks_values_old(self, knesset_number=None):
"""Return mks values.
:param knesset_number: The knesset numer of the mks. ``None`` will
return current knesset (default: ``None``).
"""
mks_grade = Agenda.objects.get_mks_values()
if knesset_number is None:
knesset = Knesset.objects.current_knesset()
else:
knesset = Knesset.objects.get(pk=knesset_number)
mks_ids = Member.objects.filter(
current_party__knesset=knesset).values_list('pk', flat=True)
grades = mks_grade.get(self.id, [])
current_grades = [x for x in grades if x[0] in mks_ids]
return current_grades
def get_party_values(self):
party_grades = Agenda.objects.get_all_party_values()
return party_grades.get(self.id,[])
def get_all_party_values(self):
return Agenda.objects.get_all_party_values()
def get_suggested_votes_by_agendas(self, num):
votes = Vote.objects.filter(~Q(agendavotes__agenda=self))
votes = votes.annotate(score=Sum('agendavotes__importance'))
return votes.order_by('-score')[:num]
def get_suggested_votes_by_agenda_tags(self, num):
# TODO: This is untested, agendas currently don't have tags
votes = Vote.objects.filter(~Q(agendavotes__agenda=self))
tag_importance_subquery = """
SELECT sum(av.importance)
FROM agendas_agendavote av
JOIN tagging_taggeditem avti ON avti.object_id=av.id and avti.object_type_id=%s
JOIN tagging_taggeditem ati ON ati.object_id=agendas_agenda.id and ati.object_type_id=%s
WHERE avti.tag_id = ati.tag_id
"""
agenda_type_id = ContentType.objects.get_for_model(self).id
votes = votes.extra(select=dict(score = tag_importance_subquery),
select_params = [agenda_type_id]*2)
return votes.order_by('-score')[:num]
def get_suggested_votes_by_controversy(self, num):
votes = Vote.objects.filter(~Q(agendavotes__agenda=self))
votes = votes.extra(select=dict(score = 'controversy'))
return votes.order_by('-score')[:num]
SUMMARY_TYPES = (
('AG','Agenda Votes'),
('MK','MK Counter')
)
class SummaryAgenda(models.Model):
agenda = models.ForeignKey(Agenda, related_name='score_summaries')
month = models.DateTimeField(db_index=True)
summary_type = models.CharField(max_length=2, choices=SUMMARY_TYPES)
score = models.FloatField(default=0.0)
votes = models.BigIntegerField(default=0)
for_votes = models.BigIntegerField(default=0)
against_votes = models.BigIntegerField(default=0)
mk = models.ForeignKey(Member,blank=True, null=True, related_name='agenda_summaries')
db_created = models.DateTimeField(auto_now_add=True)
db_updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s %s %s %s (%f,%d)" % (str(self.agenda_id),str(self.month),self.summary_type,str(self.mk_id) if self.mk else u'n/a',self.score,self.votes)
from listeners import *
def dateMonthTruncate(dt):
dt = dt.replace(day=1)
if type(dt) == datetime.datetime:
dt = dt.replace(hour=0,minute=0,second=0,microsecond=0)
else:
dt = datetime.datetime(year=dt.year,month=dt.month,day=1)
return dt
def indexby(data,fieldFunc):
d = defaultdict(list)
for k,v in map(lambda d:(fieldFunc(d),d),data):
d[k].append(v)
return d
| |
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import inspect
import sys
import zlib
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
"""
return self.decompressobj.decompress(value)
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
"""
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
return getattr(obj, parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def doctests():
import doctest
return doctest.DocTestSuite()
| |
#!/usr/bin/env python3
import matplotlib
#matplotlib.use("Agg")
import subprocess #import check_output
import operator
from os import mkdir
from shutil import rmtree
from networkit import *
from pylab import *
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
import os.path
import sys, traceback
import time
from param_fitter import ParameterFitter
def getModularity(G):
plm = community.PLM(G).run()
return community.Modularity().getQuality(plm.getPartition(), G)
def numberOfComponents(G):
return properties.ConnectedComponents(G).run().numberOfComponents()
def clustering(G):
return properties.ClusteringCoefficient().avgLocal(G)
def averageBetweennessPositions(G):
positions =[]
count = 5
for i in range(0, G.numberOfNodes()):
positions.append(0)
for i in range(0,count):
bt = centrality.ApproxBetweenness(G, 0.2,0.1).run().scores()
ids = range(0, len(bt))
nodes_bt = dict(zip(ids, bt))
sorted_bt = sorted(nodes_bt.items(), key=operator.itemgetter(1))
pos = G.numberOfNodes()
for (nodeid, betweennes) in sorted_bt:
#if pos == 1: print(nodeid, betweennes)
positions[nodeid] = positions[nodeid] + pos
pos-=1
for i in range(0, len(positions)):
positions[i] = positions[i]/count
#print(positions[107])
return positions
def main():
if(len(sys.argv) < 4): #minimum to run
print("Invalid number of parameter: Usage: ./generateSparseGraphs name path numnodes")
sys.exit(0)
setLogLevel("FATAL")
name = sys.argv[1]
path = sys.argv[2]
num_nodes =sys.argv[3]
run_args = sys.argv[4:]
numrun= 2
print("Running ",name)
start_time = time.time()
if os.path.exists("reports/"+name):
rmtree("reports/"+name)
mkdir("reports/"+name)
if os.path.exists("out/"+name):
rmtree("out/"+name)
mkdir("out/"+name)
d_file = "reports/"+name+"/_diameter.txt"
c_file = "reports/"+name+"/_clust_coef.txt"
c_file2 = "reports/"+name+"/_clust_coef2.txt"
comp_file = "reports/"+name+"/_components.txt"
rho_deg_file = "reports/"+name+"/_degCentrality.txt"
rho_pag_file = "reports/"+name+"/_pagerank.txt"
rho_bet_file = "reports/"+name+"/_betweenness.txt"
mod_file = "reports/"+name+"/_modularity.txt"
clust_dist_file = "reports/"+name+"/_clust_dist.txt"
G = graphio.EdgeListReader(' ',0,"#",True, False).read(path)
orig_diameter = properties.Diameter.exactDiameter(G)
orig_clustC = clustering(G)
numComp = numberOfComponents(G)
loc_clust_dist = centrality.LocalClusteringCoefficient(G).run().scores()
#print(" computing degeree cent")
deg_centr = centrality.DegreeCentrality(G).run().scores()
#print(" computing page rank")
page_rank = centrality.PageRank(G).run().scores()
#print(" computing Betweenness")
betw = averageBetweennessPositions(G)
#print(" computing modularity")
modularity = getModularity(G)
# fitter = ParameterFitter(G, 20)
r = 0.1
#read the graphs and run computations
while(round(r,1) <= 0.9):
# e = fitter.binarySearchParameter(round(r,1))
try:
print("Generating graphs for "+ str(round(r,1)))
run_cmd = ["./minla", "-i", path, "-n", num_nodes, "--zero-index", "-b", str(numrun), "-s", str(round(r,1)), "--gname", name, ]
if(len(run_args)):
for a in run_args:
run_cmd.append(a)
#output = check_output(run_cmd)
subprocess.call(run_cmd)
except Exception as e:
print("Process execution failed.",e)
traceback.print_exc(file=sys.stdout)
sys.exit(0)
sparse_g = []
for i in range(numrun):
try:
#print(" Reading graph file for ", round(r,1)," at ",i)
s_file = "out/"+name+"/"+name+"_"+str(round(r,1))+"_"+str(i)+".txt"
sparse_g.append(graphio.EdgeListReader(' ',0,"#",True, False).read(s_file))
except Exception as e:
print("Failed to read the graph file at "+str(r)+" "+str(i),e)
sys.exit(0)
#print(" Computing properties at ", round(r,1))
#compute diameter
sumD = 0.0
sumC = 0.0
connComp = 0.0
rho_deg =0
rho_bet=0
rho_pag = 0
rho_clust = 0
mod =0
edge_avg = .0
for g in sparse_g:
sumD = sumD + properties.Diameter.exactDiameter(g)
sumC = sumC + clustering(g)
connComp = connComp + numberOfComponents(g)
mod += getModularity(g)
edge_avg+= g.numberOfEdges()
edge_avg = edge_avg/float(numrun)
for q in range(numrun):
sg = sparse_g[q]
rho_clust += spearmanr(loc_clust_dist, centrality.LocalClusteringCoefficient(sg).run().scores())[0]
rho_deg+= spearmanr(deg_centr,centrality.DegreeCentrality(sg).run().scores())[0]
rho_bet+= spearmanr(betw,averageBetweennessPositions(sg))[0]
rho_pag+= spearmanr(page_rank,centrality.PageRank(sg).run().scores())[0]
edgeRatio = edge_avg / float(G.numberOfEdges())
edgeRatio = round(edgeRatio,2)
avgD = sumD/len(sparse_g) #average Diameter
avgC = sumC/len(sparse_g) #average Clustering Coefficient
connComp = (connComp / len(sparse_g))/float(numComp)
rho_deg = rho_deg/len(sparse_g)
rho_bet = rho_bet/len(sparse_g)
rho_pag = rho_pag/len(sparse_g)
rho_clust = rho_clust/len(sparse_g)
mod = mod/len(sparse_g)
mod =mod/modularity
#print(" Writing to file ", round(r,1))
with open(d_file,"a") as f:
f.write(str(round(orig_diameter/avgD,4)) +" "+ str(edgeRatio) +"\n")
with open(c_file,"a") as f:
f.write(str(round(avgC - orig_clustC,4)) +" "+ str(edgeRatio) +"\n")
with open(c_file2,"a") as f:
f.write(str(round(avgC,4)) +" "+ str(edgeRatio) +"\n")
with open(comp_file,"a") as f:
f.write(str(round(connComp,2)) +" "+ str(edgeRatio) +"\n")
with open(rho_deg_file,"a") as f:
f.write(str(round(rho_deg,4)) +" "+ str(edgeRatio) +"\n")
with open(rho_bet_file,"a") as f:
f.write(str(round(rho_bet,4)) +" "+ str(edgeRatio) +"\n")
with open(rho_pag_file,"a") as f:
f.write(str(round(rho_pag,4)) +" "+ str(edgeRatio) +"\n")
with open(mod_file,"a") as f:
f.write(str(round(mod,4)) +" "+ str(edgeRatio) +"\n")
with open(clust_dist_file,"a") as f:
f.write(str(round(rho_clust,4)) +" "+ str(edgeRatio) +"\n")
r =round(r,1) + 0.1
#remove output files
if os.path.exists("out/"+name):
rmtree("out/"+name)
mkdir("out/"+name)
print("Finalizing ", name)
#write properties of the full graph
with open(d_file,"a") as f:
f.write("1.0 1.0\n")
with open(c_file,"a") as f:
f.write("0.0 1.0\n")
with open(c_file2,"a") as f:
f.write( str(round(orig_clustC,2)) +" 1.0\n")
with open(comp_file,"a") as f:
f.write("1.0 1.0\n")
with open(mod_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_deg_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_bet_file,"a") as f:
f.write("1.0 1.0\n")
with open(rho_pag_file,"a") as f:
f.write("1.0 1.0\n")
with open(clust_dist_file,"a") as f:
f.write("1.0 1.0\n")
print("Completed run for graph: "+name+" in "+ str(time.time() - start_time))
if __name__ == '__main__':
main()
| |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from solum.api.controllers.v1 import component
from solum.api.controllers.v1.datamodel import component as componentmodel
from solum.common import exception
from solum import objects
from solum.tests import base
from solum.tests import fakes
@mock.patch('solum.common.policy.check')
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.component_handler.ComponentHandler')
class TestComponentController(base.BaseTestCase):
def setUp(self):
super(TestComponentController, self).setUp()
objects.load()
def test_component_get(self, ComponentHandler, resp_mock,
request_mock, mock_policy):
mock_policy.return_value = True
hand_get = ComponentHandler.return_value.get
fake_component = fakes.FakeComponent()
hand_get.return_value = fake_component
obj = component.ComponentController('test_id')
resp = obj.get()
self.assertIsNotNone(resp)
self.assertEqual(fake_component.name, resp['result'].name)
self.assertEqual(fake_component.description,
resp['result'].description)
hand_get.assert_called_with('test_id')
self.assertEqual(200, resp_mock.status)
def test_component_get_not_found(self, ComponentHandler,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
hand_get = ComponentHandler.return_value.get
hand_get.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
cont = component.ComponentController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_component_put_none(self, ComponentHandler,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
request_mock.body = None
request_mock.content_type = 'application/json'
hand_put = ComponentHandler.return_value.put
hand_put.return_value = fakes.FakeComponent()
component.ComponentController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_component_put_not_found(self, ComponentHandler,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
json_update = {'user_id': 'foo', 'name': 'appy'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = ComponentHandler.return_value.update
hand_update.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
component.ComponentController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(404, resp_mock.status)
def test_component_put_ok(self, ComponentHandler, resp_mock,
request_mock, mock_policy):
mock_policy.return_value = True
json_update = {'name': 'update_foo',
'description': 'update_desc_component',
'user_id': 'user_id_test',
'project_id': 'project_id_test'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = ComponentHandler.return_value.update
hand_update.return_value = fakes.FakeComponent()
component.ComponentController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(200, resp_mock.status)
def test_component_delete_not_found(self, ComponentHandler,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
hand_delete = ComponentHandler.return_value.delete
hand_delete.side_effect = exception.ResourceNotFound(
name='component', component_id='test_id')
obj = component.ComponentController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_component_delete_ok(self, ComponentHandler,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
hand_delete = ComponentHandler.return_value.delete
hand_delete.return_value = None
obj = component.ComponentController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(204, resp_mock.status)
@mock.patch('solum.common.policy.check')
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.component_handler.ComponentHandler')
class TestComponentsController(base.BaseTestCase):
def setUp(self):
super(TestComponentsController, self).setUp()
objects.load()
def test_components_get_all(self, handler_mock, resp_mock,
request_mock, mock_policy):
mock_policy.return_value = True
hand_get_all = handler_mock.return_value.get_all
fake_component = fakes.FakeComponent()
hand_get_all.return_value = [fake_component]
obj = component.ComponentsController()
resp = obj.get_all()
hand_get_all.assert_called_with()
self.assertIsNotNone(resp)
self.assertEqual(fake_component.name, resp['result'][0].name)
self.assertEqual(fake_component.description,
resp['result'][0].description)
self.assertEqual(200, resp_mock.status)
def test_components_post(self, handler_mock, resp_mock,
request_mock, mock_policy):
json_create = {'name': 'foo',
'description': 'test_desc_component',
'user_id': 'user_id_test',
'project_id': 'project_id_test'}
mock_policy.return_value = True
request_mock.body = json.dumps(json_create)
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeComponent()
component.ComponentsController().post()
handler_create.assert_called_with(json_create)
self.assertEqual(201, resp_mock.status)
handler_create.assert_called_once_with(json_create)
def test_components_post_nodata(self, handler_mock,
resp_mock, request_mock, mock_policy):
mock_policy.return_value = True
request_mock.body = ''
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeComponent()
ret_val = component.ComponentsController().post()
self.assertEqual("Missing argument: \"data\"",
str(ret_val['faultstring']))
self.assertEqual(400, resp_mock.status)
class TestComponentAsDict(base.BaseTestCase):
scenarios = [
('none', dict(data=None)),
('one', dict(data={'name': 'foo'})),
('full', dict(data={'uri': 'http://example.com/v1/components/x1',
'name': 'Example-component',
'type': 'component',
'component_type': 'heat_stack',
'tags': ['small'],
'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'description': 'A component'}))
]
def test_as_dict(self):
objects.load()
if self.data is None:
s = componentmodel.Component()
self.data = {}
else:
s = componentmodel.Component(**self.data)
if 'uri' in self.data:
del self.data['uri']
if 'type' in self.data:
del self.data['type']
self.assertEqual(self.data,
s.as_dict(objects.registry.Component))
| |
import time
import pytest
import uqbar.strings
import supriya.assets.synthdefs
import supriya.nonrealtime
import supriya.patterns
pmono_01 = supriya.patterns.Pmono(
amplitude=1.0,
duration=supriya.patterns.Pseq([1.0, 2.0, 3.0], 1),
frequency=supriya.patterns.Pseq([440, 660, 880], 1),
)
pmono_02 = supriya.patterns.Pmono(
amplitude=1.0,
duration=supriya.patterns.Pseq([1.0, 2.0, 3.0], 1),
frequency=supriya.patterns.Pseq([[440, 550], [550, 660], [660, 770]]),
)
def test_manual_incommunicado_pmono_01():
lists, deltas = pytest.helpers.manual_incommunicado(pmono_01)
assert lists == [
[10, [["/s_new", "default", 1000, 0, 1, "amplitude", 1.0, "frequency", 440]]],
[11.0, [["/n_set", 1000, "amplitude", 1.0, "frequency", 660]]],
[13.0, [["/n_set", 1000, "amplitude", 1.0, "frequency", 880]]],
[16.0, [["/n_set", 1000, "gate", 0]]],
]
assert deltas == [1.0, 2.0, 3.0, None]
def test_manual_communicado_pmono_01(server):
player = supriya.patterns.RealtimeEventPlayer(pmono_01, server=server)
# Initial State
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
# Step 1
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 1.0, frequency: 440.0, gate: 1.0, pan: 0.5
"""
)
# Step 2
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 1.0, pan: 0.5
"""
)
# Step 3
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 1.0, frequency: 880.0, gate: 1.0, pan: 0.5
"""
)
# Step 4
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 1.0, frequency: 880.0, gate: 0.0, pan: 0.5
"""
)
# Wait for termination
time.sleep(0.5)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
def test_automatic_communicado_pmono_01(server):
pmono_01.play(server=server)
time.sleep(1)
def test_manual_incommunicado_pmono_02():
lists, deltas = pytest.helpers.manual_incommunicado(pmono_02)
assert lists == [
[
10,
[
["/s_new", "default", 1000, 0, 1, "amplitude", 1.0, "frequency", 440],
["/s_new", "default", 1001, 0, 1, "amplitude", 1.0, "frequency", 550],
],
],
[
11.0,
[
["/n_set", 1000, "amplitude", 1.0, "frequency", 550],
["/n_set", 1001, "amplitude", 1.0, "frequency", 660],
],
],
[
13.0,
[
["/n_set", 1000, "amplitude", 1.0, "frequency", 660],
["/n_set", 1001, "amplitude", 1.0, "frequency", 770],
],
],
[16.0, [["/n_set", 1000, "gate", 0], ["/n_set", 1001, "gate", 0]]],
]
assert deltas == [1.0, 2.0, 3.0, None]
def test_manual_communicado_pmono_02(server):
player = supriya.patterns.RealtimeEventPlayer(pmono_02, server=server)
# Initial State
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
# Step 1
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 1.0, frequency: 550.0, gate: 1.0, pan: 0.5
1000 default
out: 0.0, amplitude: 1.0, frequency: 440.0, gate: 1.0, pan: 0.5
"""
)
# Step 2
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 1.0, pan: 0.5
1000 default
out: 0.0, amplitude: 1.0, frequency: 550.0, gate: 1.0, pan: 0.5
"""
)
# Step 3
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 1.0, frequency: 770.0, gate: 1.0, pan: 0.5
1000 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 1.0, pan: 0.5
"""
)
# Step 4
player(0, 0)
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 1.0, frequency: 770.0, gate: 0.0, pan: 0.5
1000 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 0.0, pan: 0.5
"""
)
# Wait for termination
time.sleep(0.5)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
def test_automatic_communicado_pmono_02(server):
pmono_02.play(server=server)
time.sleep(1)
def test_nonrealtime_01():
session = supriya.nonrealtime.Session()
with session.at(10):
session.inscribe(pmono_01)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[supriya.assets.synthdefs.default]
)
assert session.to_lists() == [
[
10.0,
[
*d_recv_commands,
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1000,
0,
0,
"amplitude",
1.0,
"frequency",
440,
],
],
],
[11.0, [["/n_set", 1000, "amplitude", 1.0, "frequency", 660]]],
[13.0, [["/n_set", 1000, "amplitude", 1.0, "frequency", 880]]],
[16.0, [["/n_set", 1000, "gate", 0], [0]]],
]
def test_nonrealtime_02():
session = supriya.nonrealtime.Session()
with session.at(0):
session.inscribe(pmono_02)
d_recv_commands = pytest.helpers.build_d_recv_commands(
[supriya.assets.synthdefs.default]
)
assert session.to_lists() == [
[
0.0,
[
*d_recv_commands,
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1000,
0,
0,
"amplitude",
1.0,
"frequency",
440,
],
[
"/s_new",
"da0982184cc8fa54cf9d288a0fe1f6ca",
1001,
0,
0,
"amplitude",
1.0,
"frequency",
550,
],
],
],
[
1.0,
[
["/n_set", 1001, "amplitude", 1.0, "frequency", 660],
["/n_set", 1000, "amplitude", 1.0, "frequency", 550],
],
],
[
3.0,
[
["/n_set", 1001, "amplitude", 1.0, "frequency", 770],
["/n_set", 1000, "amplitude", 1.0, "frequency", 660],
],
],
[6.0, [["/n_set", 1000, "gate", 0], ["/n_set", 1001, "gate", 0], [0]]],
]
def test_manual_stop_pmono_01(server):
# Initial State
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
player = pmono_01.play(server=server)
time.sleep(2)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1000 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 1.0, pan: 0.5
"""
)
player.stop()
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
# Wait for termination
time.sleep(0.5)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
def test_manual_stop_pmono_02(server):
# Initial State
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
player = pmono_02.play(server=server)
time.sleep(2)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
1001 default
out: 0.0, amplitude: 1.0, frequency: 660.0, gate: 1.0, pan: 0.5
1000 default
out: 0.0, amplitude: 1.0, frequency: 550.0, gate: 1.0, pan: 0.5
"""
)
player.stop()
server.sync()
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
# Wait for termination
time.sleep(0.5)
server_state = str(server.query_remote_nodes(include_controls=True))
assert server_state == uqbar.strings.normalize(
r"""
NODE TREE 0 group
1 group
"""
)
| |
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_pool_output_shape_2d():
def test_same(input_shape, local_sizes, strides, pads, ignore_border):
res = tn.downsample.pool_output_shape(
input_shape,
(2, 3),
local_sizes,
strides,
pads,
ignore_border,
)
from theano.tensor.signal.downsample import max_pool_2d
ans = max_pool_2d(
T.constant(np.random.randn(*input_shape).astype(fX)),
ds=local_sizes,
st=strides,
ignore_border=ignore_border,
padding=pads,
).shape.eval()
print(ans, res)
np.testing.assert_equal(ans, res)
# tests w/ ignore border
test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), True)
test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), True)
test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), True)
test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 1), True)
test_same((1, 1, 5, 6), (2, 3), (2, 2), (1, 0), True)
test_same((1, 1, 1, 1), (2, 3), (2, 2), (1, 1), True)
# tests w/o ignore border, and stride <= pool_size
test_same((1, 1, 5, 6), (2, 3), (1, 1), (0, 0), False)
test_same((1, 1, 5, 6), (2, 3), (2, 2), (0, 0), False)
test_same((1, 1, 1, 1), (2, 3), (2, 2), (0, 0), False)
# tests w/o ignore border, and stride > pool_size
test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
test_same((1, 1, 5, 6), (2, 3), (3, 3), (0, 0), False)
test_same((1, 1, 1, 1), (2, 3), (3, 3), (0, 0), False)
def test_pool_output_shape_3d():
def test_same(input_shape, local_sizes, strides, pads, ignore_border, ans):
res = tn.downsample.pool_output_shape(
input_shape,
(2, 3, 4),
local_sizes,
strides,
pads,
ignore_border,
)
print(ans, res)
np.testing.assert_equal(ans, res)
test_same((1, 1, 2, 2, 2), (2, 2, 2), (2, 2, 2), (0, 0, 0), False,
ans=(1, 1, 1, 1, 1))
def test_pool_output_shape_custom_pool_2d_node():
def test_same(input_shape, local_sizes, strides, pads, ignore_border):
res = tn.downsample.pool_output_shape(
input_shape,
(2, 3),
local_sizes,
strides,
pads,
ignore_border,
)
# pool2d node assumes 0 padding
assert pads == (0, 0)
# pool2d node assumes ignoring border
assert ignore_border
network = tn.SequentialNode(
"s",
[tn.ConstantNode("c",
value=np.random.randn(*input_shape).astype(fX)),
tn.CustomPool2DNode("p",
pool_function=T.mean,
pool_size=local_sizes,
stride=strides,
)]
).network()
ans = network["p"].get_vw("default").variable.shape.eval()
print(ans, res)
np.testing.assert_equal(ans, res)
test_same((3, 4, 5, 6), (2, 3), (1, 1), (0, 0), True)
test_same((3, 4, 5, 6), (2, 3), (2, 2), (0, 0), True)
test_same((3, 4, 1, 1), (2, 3), (2, 2), (0, 0), True)
def test_feature_pool_node_serialization():
tn.check_serialization(tn.FeaturePoolNode("a"))
def test_maxout_node_serialization():
tn.check_serialization(tn.MaxoutNode("a"))
def test_custom_pool_2d_node_serialization():
tn.check_serialization(tn.CustomPool2DNode("a"))
def test_mean_pool_2d_node_serialization():
tn.check_serialization(tn.MeanPool2DNode("a"))
def test_global_pool_node_serialization():
tn.check_serialization(tn.CustomGlobalPoolNode("a"))
def test_maxout_hyperparameters():
nt.assert_equal(
set(tn.FeaturePoolNode.hyperparameter_names),
set(tn.MaxoutNode.hyperparameter_names + ("pool_function",)))
def test_maxout_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 15)),
tn.MaxoutNode("m", num_pieces=5)]).network()
fn = network.function(["i"], ["m"])
x = np.arange(15).astype(fX).reshape(1, 15)
np.testing.assert_equal(fn(x)[0],
np.array([[4, 9, 14]], dtype=fX))
def test_mean_pool_2d_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 4, 4)),
tn.MeanPool2DNode("m", pool_size=(2, 2))]).network()
fn = network.function(["i"], ["m"])
x = np.arange(16).astype(fX).reshape(1, 1, 4, 4)
ans = np.array([[[[0 + 1 + 4 + 5, 2 + 3 + 6 + 7],
[8 + 9 + 12 + 13, 10 + 11 + 14 + 15]]]], dtype=fX) / 4
np.testing.assert_equal(ans, fn(x)[0])
nt.assert_equal(ans.shape,
network["m"].get_vw("default").shape)
def test_max_pool_2d_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 4, 4)),
tn.MaxPool2DNode("m", pool_size=(2, 2))]).network()
fn = network.function(["i"], ["m"])
x = np.arange(16).astype(fX).reshape(1, 1, 4, 4)
ans = np.array([[[[5, 7],
[13, 15]]]], dtype=fX)
np.testing.assert_equal(ans, fn(x)[0])
nt.assert_equal(ans.shape,
network["m"].get_vw("default").shape)
# sum pool doesn't work with cudnn
if "gpu" not in theano.config.device:
def test_sum_pool_2d_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 4, 4)),
tn.SumPool2DNode("m", pool_size=(2, 2))]).network()
fn = network.function(["i"], ["m"])
x = np.arange(16).astype(fX).reshape(1, 1, 4, 4)
ans = np.array([[[[0 + 1 + 4 + 5, 2 + 3 + 6 + 7],
[8 + 9 + 12 + 13, 10 + 11 + 14 + 15]]]], dtype=fX)
np.testing.assert_equal(ans, fn(x)[0])
nt.assert_equal(ans.shape,
network["m"].get_vw("default").shape)
def test_custom_global_pool_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(6, 5, 4, 3)),
tn.CustomGlobalPoolNode("gp", pool_function=T.mean)]
).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(6, 5, 4, 3).astype(fX)
ans = x.mean(axis=(2, 3))
np.testing.assert_allclose(ans,
fn(x)[0],
rtol=1e-5,
atol=1e-7)
def test_global_mean_pool_2d_node():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(6, 5, 4, 3)),
tn.GlobalMeanPool2DNode("gp")]
).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(6, 5, 4, 3).astype(fX)
ans = x.mean(axis=(2, 3))
np.testing.assert_allclose(ans,
fn(x)[0],
rtol=1e-5,
atol=1e-7)
| |
# $Id: TestFileCIFSwriteHTTPread.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for TestFileCIFSwriteHTTPread module
#
import os
import sys
import httplib
import urllib2
import unittest
import subprocess
sys.path.append("../..")
from TestConfig import TestConfig
import TestHttpUtils
class TestFileCIFSwriteHTTPread(unittest.TestCase):
def do_HTTP_redirect(self, opener, method, uri, data, content_type):
return TestHttpUtils.do_HTTP_redirect(opener, method, uri, data, content_type)
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testSharedUserCIFS(self):
mountcommand = ( '/sbin/mount.cifs //%(host)s/%(share)s/ %(mountpt)s -o rw,user=%(user)s,password=%(pass)s,nounix,forcedirectio' %
{ 'host': TestConfig.hostname
, 'share': TestConfig.cifssharename
, 'userA': TestConfig.userAname
, 'user': TestConfig.userAname
, 'mountpt': TestConfig.cifsmountpoint
, 'pass': TestConfig.userApass
} )
status=os.system(mountcommand)
self.assertEqual(status, 0, 'CIFS Mount failure')
f = open(TestConfig.cifsmountpoint+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.cifsmountpoint+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content by user A in shared space')
f = open(TestConfig.cifsmountpoint+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(TestConfig.cifsmountpoint+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content by user A')
os.system('/sbin/umount.cifs '+TestConfig.cifsmountpoint)
def testSharedUserHTTPB(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.userBname, TestConfig.userBpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can create file in User A's area by HTTP! " + str(message)
#print "URI: "+TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp'
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can update file in User A's shared area by HTTP! " + str(message)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "User B can update file in User A's collab area by HTTP! " + str(message)
def testSharedUserHTTPRGLeader(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.userRGleadername, TestConfig.userRGleaderpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
try:
self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
phan=urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage=phan.read()
self.assertEqual(thepage,modifystring)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 401, "Operation should be 401 (auth failed), was: "+str(e))
disallowed = True
assert disallowed, "Group leader can update file in User A's shared area by HTTP!"
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed,"Group leader can update file in User A's collab area by HTTP! " + str(message)
def testSharedUserHTTPCollab(self):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, TestConfig.webdavbaseurl, TestConfig.collabname, TestConfig.collabpass)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
createstring="Test creation of file\n"
modifystring="And this is after an update"
thepage=None
try:
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
except:
pass
assert (thepage==None), "Collaborator can read file in User A's shared area by HTTP!"
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/shared/'+TestConfig.userAname+'/testCreateFileCIFSAsharedspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed,"Collaborator can update file in User A's shared area by HTTP! " + str(message)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp')
thepage = pagehandle.read()
self.assertEqual(thepage, createstring)
disallowed = False
message = self.do_HTTP_redirect(opener, "PUT",
TestConfig.webdavbaseurl+'/collab/'+TestConfig.userAname+'/testCreateFileCIFSAcollabspace.tmp',
modifystring, 'text/plain')
if message[0] == 401:
disallowed = True
assert disallowed, "Collaborator can update file in User A's collab area by HTTP! " + str(message)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testSharedUserCIFS"
, "testSharedUserHTTPB"
, "testSharedUserHTTPRGLeader"
, "testSharedUserHTTPCollab"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testReadMeSSH"
, "testReadMeDAVfs"
, "testCreateFileDAVfs"
, "testUpdateFileDAVfs"
, "testDeleteFileDAVfs"
, "testDeleteFileCIFS"
, "testDeleteFileHTTP"
]
}
return TestUtils.getTestSuite(TestFileCIFSwriteHTTPread, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileCIFSwriteHTTPread", getTestSuite, sys.argv)
# End.
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# The root URL for storage.
BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
# URL to the ViewVC commit page.
BUILD_VIEWVC_URL = 'http://src.chromium.org/viewvc/chrome?view=rev&revision=%d'
# Changelogs URL.
CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d'
# DEPS file URL.
DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
# WebKit Changelogs URL.
WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \
'trunk/?rev=%d&stop_rev=%d&verbose=on'
###############################################################################
import math
import optparse
import os
import pipes
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from xml.etree import ElementTree
import zipfile
class PathContext(object):
"""A PathContext is used to carry the information used to construct URLs and
paths when dealing with the storage server and archives."""
def __init__(self, platform, good_revision, bad_revision):
super(PathContext, self).__init__()
# Store off the input parameters.
self.platform = platform # What's passed in to the '-a/--archive' option.
self.good_revision = good_revision
self.bad_revision = bad_revision
# The name of the ZIP file in a revision directory on the server.
self.archive_name = None
# Set some internal members:
# _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
# _archive_extract_dir = Uncompressed directory in the archive_name file.
# _binary_name = The name of the executable to run.
if self.platform == 'linux' or self.platform == 'linux64':
self._listing_platform_dir = 'Linux/'
self.archive_name = 'chrome-linux.zip'
self._archive_extract_dir = 'chrome-linux'
self._binary_name = 'chrome'
# Linux and x64 share all the same path data except for the archive dir.
if self.platform == 'linux64':
self._listing_platform_dir = 'Linux_x64/'
elif self.platform == 'mac':
self._listing_platform_dir = 'Mac/'
self.archive_name = 'chrome-mac.zip'
self._archive_extract_dir = 'chrome-mac'
self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
elif self.platform == 'win':
self._listing_platform_dir = 'Win/'
self.archive_name = 'chrome-win32.zip'
self._archive_extract_dir = 'chrome-win32'
self._binary_name = 'chrome.exe'
else:
raise Exception('Invalid platform: %s' % self.platform)
def GetListingURL(self, marker=None):
"""Returns the URL for a directory listing, with an optional marker."""
marker_param = ''
if marker:
marker_param = '&marker=' + str(marker)
return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
marker_param
def GetDownloadURL(self, revision):
"""Gets the download URL for a build archive of a specific revision."""
return "%s/%s%d/%s" % (
BASE_URL, self._listing_platform_dir, revision, self.archive_name)
def GetLastChangeURL(self):
"""Returns a URL to the LAST_CHANGE file."""
return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
def GetLaunchPath(self):
"""Returns a relative path (presumably from the archive extraction location)
that is used to run the executable."""
return os.path.join(self._archive_extract_dir, self._binary_name)
def ParseDirectoryIndex(self):
"""Parses the Google Storage directory listing into a list of revision
numbers. The range starts with self.good_revision and goes until
self.bad_revision."""
def _FetchAndParse(url):
"""Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
next-marker is not None, then the listing is a partial listing and another
fetch should be performed with next-marker being the marker= GET
parameter."""
handle = urllib.urlopen(url)
document = ElementTree.parse(handle)
# All nodes in the tree are namespaced. Get the root's tag name to extract
# the namespace. Etree does namespaces as |{namespace}tag|.
root_tag = document.getroot().tag
end_ns_pos = root_tag.find('}')
if end_ns_pos == -1:
raise Exception("Could not locate end namespace for directory index")
namespace = root_tag[:end_ns_pos + 1]
# Find the prefix (_listing_platform_dir) and whether or not the list is
# truncated.
prefix_len = len(document.find(namespace + 'Prefix').text)
next_marker = None
is_truncated = document.find(namespace + 'IsTruncated')
if is_truncated is not None and is_truncated.text.lower() == 'true':
next_marker = document.find(namespace + 'NextMarker').text
# Get a list of all the revisions.
all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
namespace + 'Prefix')
# The <Prefix> nodes have content of the form of
# |_listing_platform_dir/revision/|. Strip off the platform dir and the
# trailing slash to just have a number.
revisions = []
for prefix in all_prefixes:
revnum = prefix.text[prefix_len:-1]
try:
revnum = int(revnum)
revisions.append(revnum)
except ValueError:
pass
return (revisions, next_marker)
# Fetch the first list of revisions.
(revisions, next_marker) = _FetchAndParse(self.GetListingURL())
# If the result list was truncated, refetch with the next marker. Do this
# until an entire directory listing is done.
while next_marker:
next_url = self.GetListingURL(next_marker)
(new_revisions, next_marker) = _FetchAndParse(next_url)
revisions.extend(new_revisions)
return revisions
def GetRevList(self):
"""Gets the list of revision numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = self.good_revision
maxrev = self.bad_revision
revlist = map(int, self.ParseDirectoryIndex())
revlist = [x for x in revlist if x >= minrev and x <= maxrev]
revlist.sort()
return revlist
def UnzipFilenameToDir(filename, dir):
"""Unzip |filename| to directory |dir|."""
cwd = os.getcwd()
if not os.path.isabs(filename):
filename = os.path.join(cwd, filename)
zf = zipfile.ZipFile(filename)
# Make base.
try:
if not os.path.isdir(dir):
os.mkdir(dir)
os.chdir(dir)
# Extract files.
for info in zf.infolist():
name = info.filename
if name.endswith('/'): # dir
if not os.path.isdir(name):
os.makedirs(name)
else: # file
dir = os.path.dirname(name)
if not os.path.isdir(dir):
os.makedirs(dir)
out = open(name, 'wb')
out.write(zf.read(name))
out.close()
# Set permissions. Permission info in external_attr is shifted 16 bits.
os.chmod(name, info.external_attr >> 16L)
os.chdir(cwd)
except Exception, e:
print >>sys.stderr, e
sys.exit(1)
def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
"""Downloads and unzips revision |rev|.
@param context A PathContext instance.
@param rev The Chromium revision number/tag to download.
@param filename The destination for the downloaded file.
@param quit_event A threading.Event which will be set by the master thread to
indicate that the download should be aborted.
@param progress_event A threading.Event which will be set by the master thread
to indicate that the progress of the download should be
displayed.
"""
def ReportHook(blocknum, blocksize, totalsize):
if quit_event and quit_event.isSet():
raise RuntimeError("Aborting download of revision %d" % rev)
if progress_event and progress_event.isSet():
size = blocknum * blocksize
if totalsize == -1: # Total size not known.
progress = "Received %d bytes" % size
else:
size = min(totalsize, size)
progress = "Received %d of %d bytes, %.2f%%" % (
size, totalsize, 100.0 * size / totalsize)
# Send a \r to let all progress messages use just one line of output.
sys.stdout.write("\r" + progress)
sys.stdout.flush()
download_url = context.GetDownloadURL(rev)
try:
urllib.urlretrieve(download_url, filename, ReportHook)
if progress_event and progress_event.isSet():
print
except RuntimeError, e:
pass
def RunRevision(context, revision, zipfile, profile, num_runs, args):
"""Given a zipped revision, unzip it and run the test."""
print "Trying revision %d..." % revision
# Create a temp directory and unzip the revision into it.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
UnzipFilenameToDir(zipfile, tempdir)
os.chdir(tempdir)
# Run the build as many times as specified.
testargs = [context.GetLaunchPath(), '--user-data-dir=%s' % profile] + args
for i in range(0, num_runs):
subproc = subprocess.Popen(testargs,
bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = subproc.communicate()
os.chdir(cwd)
try:
shutil.rmtree(tempdir, True)
except Exception, e:
pass
return (subproc.returncode, stdout, stderr)
def AskIsGoodBuild(rev, status, stdout, stderr):
"""Ask the user whether build |rev| is good or bad."""
# Loop until we get a response that we can parse.
while True:
response = raw_input('Revision %d is [(g)ood/(b)ad/(q)uit]: ' % int(rev))
if response and response in ('g', 'b'):
return response == 'g'
if response and response == 'q':
raise SystemExit()
def Bisect(platform,
good_rev=0,
bad_rev=0,
num_runs=1,
try_args=(),
profile=None,
predicate=AskIsGoodBuild):
"""Given known good and known bad revisions, run a binary search on all
archived revisions to determine the last known good revision.
@param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
@param good_rev Number/tag of the last known good revision.
@param bad_rev Number/tag of the first known bad revision.
@param num_runs Number of times to run each build for asking good/bad.
@param try_args A tuple of arguments to pass to the test application.
@param profile The name of the user profile to run with.
@param predicate A predicate function which returns True iff the argument
chromium revision is good.
Threading is used to fetch Chromium revisions in the background, speeding up
the user's experience. For example, suppose the bounds of the search are
good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
whether revision 50 is good or bad, the next revision to check will be either
25 or 75. So, while revision 50 is being checked, the script will download
revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
known:
- If rev 50 is good, the download of rev 25 is cancelled, and the next test
is run on rev 75.
- If rev 50 is bad, the download of rev 75 is cancelled, and the next test
is run on rev 25.
"""
if not profile:
profile = 'profile'
context = PathContext(platform, good_rev, bad_rev)
cwd = os.getcwd()
_GetDownloadPath = lambda rev: os.path.join(cwd,
'%d-%s' % (rev, context.archive_name))
print "Downloading list of known revisions..."
revlist = context.GetRevList()
# Get a list of revisions to bisect across.
if len(revlist) < 2: # Don't have enough builds to bisect.
msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
raise RuntimeError(msg)
# Figure out our bookends and first pivot point; fetch the pivot revision.
good = 0
bad = len(revlist) - 1
pivot = bad / 2
rev = revlist[pivot]
zipfile = _GetDownloadPath(rev)
progress_event = threading.Event()
progress_event.set()
print "Downloading revision %d..." % rev
FetchRevision(context, rev, zipfile,
quit_event=None, progress_event=progress_event)
# Binary search time!
while zipfile and bad - good > 1:
# Pre-fetch next two possible pivots
# - down_pivot is the next revision to check if the current revision turns
# out to be bad.
# - up_pivot is the next revision to check if the current revision turns
# out to be good.
down_pivot = int((pivot - good) / 2) + good
down_thread = None
if down_pivot != pivot and down_pivot != good:
down_rev = revlist[down_pivot]
down_zipfile = _GetDownloadPath(down_rev)
down_quit_event = threading.Event()
down_progress_event = threading.Event()
fetchargs = (context,
down_rev,
down_zipfile,
down_quit_event,
down_progress_event)
down_thread = threading.Thread(target=FetchRevision,
name='down_fetch',
args=fetchargs)
down_thread.start()
up_pivot = int((bad - pivot) / 2) + pivot
up_thread = None
if up_pivot != pivot and up_pivot != bad:
up_rev = revlist[up_pivot]
up_zipfile = _GetDownloadPath(up_rev)
up_quit_event = threading.Event()
up_progress_event = threading.Event()
fetchargs = (context,
up_rev,
up_zipfile,
up_quit_event,
up_progress_event)
up_thread = threading.Thread(target=FetchRevision,
name='up_fetch',
args=fetchargs)
up_thread.start()
# Run test on the pivot revision.
(status, stdout, stderr) = RunRevision(context,
rev,
zipfile,
profile,
num_runs,
try_args)
os.unlink(zipfile)
zipfile = None
# Call the predicate function to see if the current revision is good or bad.
# On that basis, kill one of the background downloads and complete the
# other, as described in the comments above.
try:
if predicate(rev, status, stdout, stderr):
good = pivot
if down_thread:
down_quit_event.set() # Kill the download of older revision.
down_thread.join()
os.unlink(down_zipfile)
if up_thread:
print "Downloading revision %d..." % up_rev
up_progress_event.set() # Display progress of download.
up_thread.join() # Wait for newer revision to finish downloading.
pivot = up_pivot
zipfile = up_zipfile
else:
bad = pivot
if up_thread:
up_quit_event.set() # Kill download of newer revision.
up_thread.join()
os.unlink(up_zipfile)
if down_thread:
print "Downloading revision %d..." % down_rev
down_progress_event.set() # Display progress of download.
down_thread.join() # Wait for older revision to finish downloading.
pivot = down_pivot
zipfile = down_zipfile
except SystemExit:
print "Cleaning up..."
for f in [_GetDownloadPath(revlist[down_pivot]),
_GetDownloadPath(revlist[up_pivot])]:
try:
os.unlink(f)
except OSError:
pass
sys.exit(0)
rev = revlist[pivot]
return (revlist[good], revlist[bad])
def GetWebKitRevisionForChromiumRevision(rev):
"""Returns the webkit revision that was in chromium's DEPS file at
chromium revision |rev|."""
# . doesn't match newlines without re.DOTALL, so this is safe.
webkit_re = re.compile(r'webkit_revision.:\D*(\d+)')
url = urllib.urlopen(DEPS_FILE % rev)
m = webkit_re.search(url.read())
url.close()
if m:
return int(m.group(1))
else:
raise Exception('Could not get webkit revision for cr rev %d' % rev)
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds.\n'
'\n'
'Tip: add "-- --no-first-run" to bypass the first run prompts.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'win', 'linux', 'linux64']
# linux-chromiumos lacks a continuous archive http://crbug.com/78158
parser.add_option('-a', '--archive',
choices = choices,
help = 'The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-b', '--bad', type = 'int',
help = 'The bad revision to bisect to.')
parser.add_option('-g', '--good', type = 'int',
help = 'The last known good revision to bisect from.')
parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
help = 'Profile to use; this will not reset every run. ' +
'Defaults to a clean profile.', default = 'profile')
parser.add_option('-t', '--times', type = 'int',
help = 'Number of times to run each build before asking ' +
'if it\'s good or bad. Temporary profiles are reused.',
default = 1)
(opts, args) = parser.parse_args()
if opts.archive is None:
print 'Error: missing required parameter: --archive'
print
parser.print_help()
return 1
if opts.bad and opts.good and (opts.good > opts.bad):
print ('The good revision (%d) must precede the bad revision (%d).\n' %
(opts.good, opts.bad))
parser.print_help()
return 1
# Create the context. Initialize 0 for the revisions as they are set below.
context = PathContext(opts.archive, 0, 0)
# Pick a starting point, try to get HEAD for this.
if opts.bad:
bad_rev = opts.bad
else:
bad_rev = 0
try:
# Location of the latest build revision number
nh = urllib.urlopen(context.GetLastChangeURL())
latest = int(nh.read())
nh.close()
bad_rev = raw_input('Bad revision [HEAD:%d]: ' % latest)
if (bad_rev == ''):
bad_rev = latest
bad_rev = int(bad_rev)
except Exception, e:
print('Could not determine latest revision. This could be bad...')
bad_rev = int(raw_input('Bad revision: '))
# Find out when we were good.
if opts.good:
good_rev = opts.good
else:
good_rev = 0
try:
good_rev = int(raw_input('Last known good [0]: '))
except Exception, e:
pass
if opts.times < 1:
print('Number of times to run (%d) must be greater than or equal to 1.' %
opts.times)
parser.print_help()
return 1
(last_known_good_rev, first_known_bad_rev) = Bisect(
opts.archive, good_rev, bad_rev, opts.times, args, opts.profile)
# Get corresponding webkit revisions.
try:
last_known_good_webkit_rev = GetWebKitRevisionForChromiumRevision(
last_known_good_rev)
first_known_bad_webkit_rev = GetWebKitRevisionForChromiumRevision(
first_known_bad_rev)
except Exception, e:
# Silently ignore the failure.
last_known_good_webkit_rev, first_known_bad_webkit_rev = 0, 0
# We're done. Let the user know the results in an official manner.
print('You are probably looking for build %d.' % first_known_bad_rev)
if last_known_good_webkit_rev != first_known_bad_webkit_rev:
print 'WEBKIT CHANGELOG URL:'
print WEBKIT_CHANGELOG_URL % (first_known_bad_webkit_rev,
last_known_good_webkit_rev)
print 'CHANGELOG URL:'
print CHANGELOG_URL % (last_known_good_rev, first_known_bad_rev)
print 'Built at revision:'
print BUILD_VIEWVC_URL % first_known_bad_rev
if __name__ == '__main__':
sys.exit(main())
| |
from __future__ import division
import numpy as np
__author__ = 'Jakob Abesser'
class Transformer:
""" Class implements different time-frequency transformations """
def __init__(self):
pass
@staticmethod
def stft(samples,
blocksize,
hopsize,
n_fft=None,
window='Hanning',
sample_rate=44100,
use_frame_center_time=False):
""" Short-time Fourier Transform (STFT), based on re-implementation of Matlab spectrogram() function
:param samples: (ndarray) Audio samples
:param blocksize: (int) Blocksize in samples
:param hopsize: (int) Hopsize in samples
:param n_fft: (int / None) FFT size in samples (zero-padding is used if necessary)
-> if None, n_fft is set to blocksize (no zero-padding)
:param window: (string) Window type, currently implemented
'Hanning': N-point symmetric Hanning window
'DiffHanning': Differentiated Hanning window
:param sample_rate: (float / None) Sampling rate in Hz
If None, sample_rate is set to 44100.
:param use_frame_center_time: (bool) Switch to use frame center as frame time, if False, frame begin is used
:return: spec: (2d ndarray) Complex STFT spectrogram (nFrequencyBins x nTimeFrames)
:return: time_frame_sec: (ndarray) Time frame values in seconds
:return: freq_bin_hz: (ndarray) Frequency bin values in Hz
"""
if n_fft is None:
n_fft = blocksize
# buffer signal
buffer_mat = Transformer.buffer_signal(samples,
blocksize,
hopsize)
num_frames = buffer_mat.shape[1]
# apply windowing
window = np.reshape(Transformer._window_function(blocksize,
window), (-1, 1))
buffer_mat *= window
# time frames
if use_frame_center_time:
time_frame_sec = (np.arange(num_frames, dtype=float)+.5)
else:
time_frame_sec = (np.arange(num_frames, dtype=float))
time_frame_sec *= hopsize/sample_rate
# frequency bins
freq_bin_hz = np.arange(n_fft/2+1)*sample_rate/n_fft
# STFT
spec = np.fft.fft(buffer_mat, n=int(n_fft), axis=0)[:int(n_fft/2)+1, :]
# compute FFTs
return spec, time_frame_sec, freq_bin_hz
@staticmethod
def buffer_signal(samples,
blocksize,
hopsize):
""" Buffer signal into overlapping or non-overlapping frames. Missing samples are filled with zeros
:param samples: (ndarray) Sample vector
:param blocksize: (int) Blocksize in samples
:param hopsize: (int) Hopsize in samples
:return: buffer: (2d ndarray) Sample buffer, dimensions: blocksize x num_frames
"""
overlap = blocksize - hopsize
num_samples = len(samples)
num_frames = np.floor((num_samples-overlap)/(blocksize-overlap))
# create sample buffer matrix with (non-)overlapping frames
col_idx = np.arange(num_frames) * hopsize
row_idx = np.reshape(np.arange(blocksize),
(-1, 1))
index = col_idx + row_idx
index = index.astype(int)
return samples[index]
@staticmethod
def reassigned_spec(samples,
blocksize,
hopsize,
sample_rate=44100,
freq_bin_hz=None,
n_fft=None):
""" Compute reassigned magnitude spectrogram by mapping STFT magnitude values to time-frequency
bins that correspond to the local instantaneous frequency. For harmonic signals, this results
in a spectrogram representation with a higher sparsity, i.e., sharper harmonic peaks compared
to the STFT. This is useful for tasks such as pitch tracking & music transcription.
:param samples: (ndarray) Audio samples
:param blocksize: (int) Blocksize in samples
:param hopsize: (int) Hopsize in samples
:param sample_rate: (int) Sampling rate in Hz
:param freq_bin_hz: (None / ndarray) Desired frequency axis (bin values in Hz) for reassigned spectrogram,
must be linearly-spaced or logarithmically-spaced.
This allows for example to define a logarithmically-spaced frequency axis which is
used to map the magnitudes to.
If None, the common linearly-spaced FFT frequency axis based on the given STFT parameters
is used
:param n_fft: (int / None) FFT size in samples (zero-padding is used if necessary)
-> if None, n_fft is set to blocksize (no zero-padding)
:return: spec: (2d ndarray) Reassigned magnitude spectrogram (nFrequencyBins x nTimeFrames)
:return: freq_bin_hz: (ndarray) Frequency bin values in Hz of reassigned spectrogram (nFrequencyBins)
:return: time_frame_sec: (ndarray) Time frame values in seconds (nTimeFrames)
:return: inst_freq_hz: (2d ndarray) Instantaneous frequency values [Hz] (nFrequencyBins x nTimeFrames)
"""
# Comptute magnitude STFT spectrogram & reassigned frequency positions based on the local instantaneous
# frequency values
inst_freq_hz, spec_stft, time_frame_sec, freq_bin_hz_stft = Transformer._inst_freq_abe(samples,
blocksize,
hopsize,
sample_rate,
n_fft=n_fft)
# magnitude spectrogram
spec_stft = np.abs(spec_stft)
# use STFT frequency axis as target frequency axis if not specified otherwise
if freq_bin_hz is None:
freq_bin_hz = freq_bin_hz_stft
assert all(np.diff(freq_bin_hz) > 0)
nBins = len(freq_bin_hz)
num_frames = spec_stft.shape[1]
spec = np.zeros((nBins, num_frames))
dt = time_frame_sec[1] - time_frame_sec[0]
# only consider instantaneous frequencies within range of target frequency axis
min_freq_hz = freq_bin_hz[0]
max_freq_hz = freq_bin_hz[-1]
idx_bin_valid = np.logical_and(inst_freq_hz >= min_freq_hz, inst_freq_hz <= max_freq_hz).nonzero()
freq_hz_reassigned = inst_freq_hz[idx_bin_valid[0], idx_bin_valid[1]]
time_sec_reassigned = time_frame_sec[idx_bin_valid[1]]
num_valid_entries = len(freq_hz_reassigned)
freq_scale_spacing_type = Transformer._scale_spacing_type(freq_bin_hz)
if freq_scale_spacing_type == 'linear':
df = freq_bin_hz[1] - freq_bin_hz[0]
freq_bin_frac_reassigned = (freq_hz_reassigned - min_freq_hz)/df
elif freq_scale_spacing_type == 'logarithmic':
bins_per_octave = np.round(1./np.log2(freq_bin_hz[2]/freq_bin_hz[1]))
freq_bin_frac_reassigned = bins_per_octave*np.log2(freq_hz_reassigned/min_freq_hz)
else:
raise Exception('Target frequency axis spacing must be linear or logarithmic!')
# no time reassignment
time_frame_frac_reassigned = np.round(time_sec_reassigned/dt)
# simple frequency reassignment by mapping magnitude to target frequency bin which is closest to instantaneous
# frequency value
# (alternative would be a fractional mapping of magnitudes to both adjacent bins, this is more time consuming
# and does not showed improvements in past experiments)
freq_bin_frac_reassigned = np.round(freq_bin_frac_reassigned)
for n in xrange(num_valid_entries):
spec[freq_bin_frac_reassigned[n], time_frame_frac_reassigned[n]] += spec_stft[idx_bin_valid[0][n], idx_bin_valid[1][n]]
return spec, freq_bin_hz, time_frame_sec, inst_freq_hz
@staticmethod
def _scale_spacing_type(scale):
"""
:param scale: (ndarray) Scale values (e.g. frequency scale values in Hz)
:return: type: (string) Scale spacing type ('linear', 'logarithmic', or 'other')
"""
assert len(scale) > 2
tol = 1e-10
if all(np.diff(scale, 2)) < tol:
return 'linear'
elif all(np.diff(np.log2(scale), 2) < tol):
return 'logarithmic'
return 'other'
@staticmethod
def _inst_freq_abe(samples,
blocksize,
hopsize,
sample_rate,
n_fft=None):
""" Compute instantaneous frequency values based on the method proposed in
Toshihiro Abe et al. in ICASSP'95, Eurospeech'97
:param samples: (ndarray) Audio samples
:param blocksize: (int) Blocksize in samples
:param hopsize: (int) Hopsize in samples
:param sample_rate: (int) Sampling rate in Hz
:param n_fft: (int / None) FFT size in samples (zero-padding is used if necessary)
-> if None, n_fft is set to blocksize (no zero-padding)
:return: inst_freq_hz: (2d ndarray) Instantaneous frequency values [Hz] (nFrequencyBins x nTimeFrames)
:return: spec: (2d ndarray) STFT spectrogram (nFrequencyBins x nTimeFrames)
This can be used for magnitude reassignment as done in reassigned_spec()
:return: time_frame_sec: (ndarray) Time frame values in seconds (nTimeFrames)
:return: freq_bin_hz: (ndarray) Frequency bin values in Hz (nFrequencyBins)
"""
# compute 2 STFTs with Hanning and DiffHanning window
spec, time_frame_sec, freq_bin_hz = Transformer.stft(samples,
blocksize,
hopsize,
n_fft,
'Hanning',
sample_rate=sample_rate)
spec_diff, _, _ = Transformer.stft(samples,
blocksize,
hopsize,
n_fft,
'DiffHanning',
sample_rate=sample_rate)
# compute instantaneous frequencies, use array broadcasting, ignore N/2 + 1 frame (fs/2)
inst_freq_hz = np.imag(spec_diff[:-1, :]/spec[:-1, :])/(2*np.pi) + np.reshape(np.arange(n_fft/2)*sample_rate/n_fft,
(-1, 1))
return inst_freq_hz, spec, time_frame_sec, freq_bin_hz
@staticmethod
def _window_function(N, window, sample_rate=44100.):
""" Create window functions
:param N: (int) window width
:param window: (string) Window type, currently implemented
'Hanning': N-point symmetric Hanning window with first and last sample being 0 (Matlab: hann())
'DiffHanning': Differentiated Hanning window
:param sample_rate: (float) Sampling rate in Hz
:return: window: (ndarray) window function
"""
if window == 'Hanning':
return .5*(1-np.cos(2*np.pi*np.arange(N)/(N-1)))
elif window == 'DiffHanning':
return -np.pi*sample_rate / (N-1) * np.sin(2*np.pi*np.arange(N)/(N-1))
else:
raise Exception('Non-valid value for window')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import constants
from neutron.db import agents_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import dhcpagentscheduler
from neutron.extensions import l3agentscheduler
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling routers to L3 agent.')),
cfg.IntOpt('dhcp_agents_per_network', default=1,
help=_('Number of DHCP agents scheduled to host a network.')),
]
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
class NetworkDhcpAgentBinding(model_base.BASEV2):
"""Represents binding between neutron networks and DHCP agents."""
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete='CASCADE'),
primary_key=True)
dhcp_agent = orm.relation(agents_db.Agent)
dhcp_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
class RouterL3AgentBinding(model_base.BASEV2, models_v2.HasId):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'))
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'))
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
"""Common class for agent scheduler mixins."""
# agent notifiers to handle agent update operations;
# should be updated by plugins;
agent_notifiers = {
constants.AGENT_TYPE_DHCP: None,
constants.AGENT_TYPE_L3: None,
constants.AGENT_TYPE_LOADBALANCER: None,
}
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agents_db.AgentDbMixin.is_agent_down(
agent['heartbeat_timestamp'])
def update_agent(self, context, id, agent):
original_agent = self.get_agent(context, id)
result = super(AgentSchedulerDbMixin, self).update_agent(
context, id, agent)
agent_data = agent['agent']
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
if (agent_notifier and
'admin_state_up' in agent_data and
original_agent['admin_state_up'] != agent_data['admin_state_up']):
agent_notifier.agent_updated(context,
agent_data['admin_state_up'],
original_agent['host'])
return result
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to db_plugin_base_v2."""
router_scheduler = None
def add_router_to_l3_agent(self, context, id, router_id):
"""Add a l3 agent to host a router."""
router = self.get_router(context, router_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_L3 or
not agent_db['admin_state_up'] or
not self.get_l3_agent_candidates(router, [agent_db])):
raise l3agentscheduler.InvalidL3Agent(id=id)
query = context.session.query(RouterL3AgentBinding)
try:
binding = query.filter(
RouterL3AgentBinding.l3_agent_id == agent_db.id,
RouterL3AgentBinding.router_id == router_id).one()
if binding:
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id, agent_id=id)
except exc.NoResultFound:
pass
result = self.auto_schedule_routers(context,
agent_db.host,
[router_id])
if not result:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent_db.host)
def remove_router_from_l3_agent(self, context, id, router_id):
"""Remove the router from l3 agent.
After it, the router will be non-hosted until there is update which
lead to re schedule or be added to another agent manually.
"""
agent = self._get_agent(context, id)
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == id)
try:
binding = query.one()
except exc.NoResultFound:
raise l3agentscheduler.RouterNotHostedByL3Agent(
router_id=router_id, agent_id=id)
context.session.delete(binding)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def list_routers_on_l3_agent(self, context, id):
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if not router_ids:
pass
else:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
return self.get_sync_data(context, router_ids=router_ids,
active=True)
else:
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
results = []
for binding in bindings:
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
results.append(l3_agent_dict)
if results:
return {'agents': results}
else:
return {'agents': []}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [l3_agent
for l3_agent in query
if AgentSchedulerDbMixin.is_eligible_agent(active, l3_agent)]
def get_l3_agent_candidates(self, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router)
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
.DhcpAgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add DHCP agent scheduler extension to db_plugin_base_v2.
"""
network_scheduler = None
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None):
if not network_ids:
return []
query = context.session.query(NetworkDhcpAgentBinding)
query = query.options(joinedload('dhcp_agent'))
if len(network_ids) == 1:
query = query.filter(
NetworkDhcpAgentBinding.network_id == network_ids[0])
elif network_ids:
query = query.filter(
NetworkDhcpAgentBinding.network_id in network_ids)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
return [binding.dhcp_agent
for binding in query
if AgentSchedulerDbMixin.is_eligible_agent(active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
not agent_db['admin_state_up']):
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
for dhcp_agent in dhcp_agents:
if id == dhcp_agent.id:
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
network_id=network_id, agent_id=id)
binding = NetworkDhcpAgentBinding()
binding.dhcp_agent_id = id
binding.network_id = network_id
context.session.add(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_added_to_agent(
context, network_id, agent_db.host)
def remove_network_from_dhcp_agent(self, context, id, network_id):
agent = self._get_agent(context, id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(NetworkDhcpAgentBinding)
binding = query.filter(
NetworkDhcpAgentBinding.network_id == network_id,
NetworkDhcpAgentBinding.dhcp_agent_id == id).one()
except exc.NoResultFound:
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id=network_id, agent_id=id)
context.session.delete(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent.host)
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
return {'networks':
self.get_networks(context, filters={'id': net_ids})}
else:
return {'networks': []}
def list_active_networks_on_active_dhcp_agent(self, context, host):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_DHCP, host)
if not agent.admin_state_up:
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
return self.get_networks(
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
def list_dhcp_agents_hosting_network(self, context, network_id):
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
if agent_ids:
return {
'agents': self.get_agents(context, filters={'id': agent_ids})}
else:
return {'agents': []}
def schedule_network(self, context, created_network):
if self.network_scheduler:
chosen_agent = self.network_scheduler.schedule(
self, context, created_network)
if not chosen_agent:
LOG.warn(_('Fail scheduling network %s'), created_network)
return chosen_agent
def auto_schedule_networks(self, context, host):
if self.network_scheduler:
self.network_scheduler.auto_schedule_networks(self, context, host)
| |
#/usr/bin/python
from __future__ import print_function
import os
from os.path import *
import subprocess
import pygst
import gst
# Local imports
from musicservice import ServiceException
'''
playback.py
Python class that controls playback
Uses GStreamer w/ Python Bindings (gstreamer.freedesktop.org)
@author: Schuyler Martin <schuylermartin45@gmail.com>
'''
__author__ = "Schuyler Martin"
# constant volume values for audio player
VOLUME_DEFAULT = 1.0
# the speech files are way quieter than the rest of the music
VOLUME_SPEECH = 7.0
def mkTextSpeech(text, fileName):
'''
Writes a text-to-speech file
:param: text Text to synthesize
:param: fileName File name to write-out to
'''
# see espeak man page for more info for args
subprocess.call(["espeak",
"-s 120",
"-a 20",
"-w" + fileName, text])
class Playback:
'''
Class that represents the playback system
'''
@staticmethod
def constructPlayer():
'''
Builds a single instance of the media player; to be shared by all
playback instances
:return: GST Player object
'''
# music player object for the stream
player = gst.element_factory_make("playbin2", "player")
# disable any attempts at playing video content
# Flags: video | audio | subtitles | software volume
player.set_property("flags", 2)
# set playback device to bluetooth
alsa_card = gst.element_factory_make("alsasink", "bluetooth")
# Notes to self about bluetooth:
# - config for alsa is set is /etc/asound.conf
# - confic for bluetooth audio is /etc/bluetooth/audio.conf
# - bluetooth daemon: /etc/init.d/bluetooth
# - also make sure that there are no other connections to the speaker
alsa_card.set_property("device", "bluetooth")
player.set_property("audio-sink", alsa_card)
return player
def __init__(self, player, service, cachePath):
'''
Constructor
:param: player Reference to the music playback device
:param: service Reference to service "interface" that will resolve
service-specific concerns. A service may be a streaming system
usic service/playback object type
:param: cachePath Path to caching information
'''
self.service = service
self.cachePath = cachePath
# tracks if we are playing the TTS playlist file now
self.pl_TTS = False
# tracks if we are playing the TTS shuffle commands now
self.shuffle_TTS = False
# dictionary of playlists; playlist id from service is the key
self.playlists = self.service.getPlaylists()
# initialize/use cache info
self.init_cache()
# if no playlists available, we have a problem to report up
if (len(self.playlists.keys()) < 1):
raise ServiceException("No Playlists Found")
# ptr to current playlist (pick the first one by default)
self.cur_id = 0
self.cur = self.playlists[self.playlists.keys()[self.cur_id]]
self.player = player
def init_cache(self):
'''
Initialize and use cache info. There is a cache for each service
that provides the following:
- Tracks/builds playlist text-to-speech information
'''
# in the cache path, check to see if the shuffle sounds are there
self.shuffle_Files = {}
self.shuffle_Files[True] = self.cachePath + "shuffleOn.wav"
self.shuffle_Files[False] = self.cachePath + "shuffleOff.wav"
if not(os.path.exists(self.shuffle_Files[True])):
mkTextSpeech("Setting shuffle on ", self.shuffle_Files[True])
if not(os.path.exists(self.shuffle_Files[False])):
mkTextSpeech("Setting shuffle off", self.shuffle_Files[False])
# for each service
srvPath = self.cachePath + self.service.strType + "/"
if not(os.path.exists(srvPath)):
os.makedirs(srvPath)
# generate any missing playlist text-to-speech data
for ids, pl in self.playlists.iteritems():
speakFile = srvPath + pl.name + ".wav"
if not(os.path.exists(speakFile)):
# write file to cache
mkTextSpeech("Playing " + self.service.plTypeTTS +" "
+ pl.name + ".", speakFile)
pl.ttsFile = "file://" + speakFile
def play(self):
'''
Play the current song and return the stream location
:return: Stream uri
'''
pl = self.playlists[self.playlists.keys()[self.cur_id]]
# special case for playing the text-to-speech message
if ((self.pl_TTS) and (pl.ttsFile != None)):
self.player.set_property("volume", VOLUME_SPEECH)
mp3Stream = pl.ttsFile
else:
self.player.set_property("volume", VOLUME_DEFAULT)
# get location of the stream from the current playlist
mp3Stream = self.service.getStream(self.cur)
# set the stream location and begin playing music
self.player.set_property("uri", mp3Stream)
self.player.set_state(gst.STATE_PLAYING)
return mp3Stream
def pause(self):
'''
Pause the current song and return the unique id of the song playing
:return: Stream uri
'''
self.player.set_state(gst.STATE_PAUSED)
return self.service.getStream(self.cur)
def stop(self):
'''
Kills the current stream
:return: Stream uri
'''
self.player.set_state(gst.STATE_NULL)
return self.service.getStream(self.cur)
def playPause(self):
'''
Plays/Pauses the song based on the current player state
:return: Results of play() or pause()
'''
if (self.player.get_state()[1] == gst.STATE_PLAYING):
return self.pause()
return self.play()
def prev(self):
'''
Moves to the previous song (wraps-around) and returns that song
:return: Results of play() function
'''
# halt/remove the current song
self.player.set_state(gst.STATE_NULL)
# change song in playlist
self.cur.prev()
return self.play()
def next(self):
'''
Moves to the next song (wraps-around) and returns that song
:return: Results of play() function
'''
# perform similar actions as with prev()
self.player.set_state(gst.STATE_NULL)
self.cur.next()
return self.play()
def shuffle(self):
'''
Shuffles/deshuffles every playlist (keeps a consistent state across all
'''
shuffle_TTS = True
# play appropriate sound notification
ttsFile = "file://" + self.shuffle_Files[not(self.cur.isShuffle)]
self.player.set_property("volume", VOLUME_SPEECH)
# all playlists should have the same shuffle state
for ids, pl in self.playlists.iteritems():
pl.shuffle()
# play audio clip messaging
self.player.set_state(gst.STATE_NULL)
self.player.set_property("uri", ttsFile)
self.player.set_state(gst.STATE_PLAYING)
def prevPl(self):
'''
Moves to the previous Playlist (wraps-around) and returns that song
:return: Results of play() function
'''
# attempt to play the identifying playlist name
self.pl_TTS = True
# halt/remove the current song
self.player.set_state(gst.STATE_NULL)
# change playlist
if (self.cur_id == 0):
self.cur_id = len(self.playlists.keys()) - 1
else:
self.cur_id -= 1
self.cur = self.playlists[self.playlists.keys()[self.cur_id]]
return self.play()
def nextPl(self):
'''
Moves to the next Playlist (wraps-around) and returns that song
:return: Results of play() function
'''
# perform similar actions as with prevPl()
self.pl_TTS = True
self.player.set_state(gst.STATE_NULL)
if (self.cur_id == (len(self.playlists.keys()) - 1 )):
self.cur_id = 0
else:
self.cur_id += 1
self.cur = self.playlists[self.playlists.keys()[self.cur_id]]
return self.play()
def main():
'''
Main execution point for testing
'''
if __name__ == '__main__':
main()
| |
import numpy as np
from numpy import sin, cos, tan, degrees, radians, arctan, arcsin
# Slip projections
## To/From offset
def offset_from_vert_sep(vert_sep, dip, rake=90.):
dip_slip = dip_slip_from_vert_sep(vert_sep, dip, rake)
return offset_from_dip_slip(dip_slip, dip, rake)
def vert_sep_from_offset(offset, dip, rake=90.):
dip_slip = dip_slip_from_offset(offset, dip, rake)
return vert_sep_from_dip_slip(dip_slip, dip, rake)
def offset_from_hor_sep(hor_sep, dip, rake=90.):
dip_slip = dip_slip_from_hor_sep(hor_sep, dip, rake)
return offset_from_dip_slip(dip_slip, dip, rake)
def hor_sep_from_offset(offset, dip, rake=90.):
dip_slip = dip_slip_from_offset(offset, dip, rake)
return hor_sep_from_dip_slip(dip_slip, dip, rake)
def offset_from_strike_slip(strike_slip, dip, rake=0.):
return strike_slip / cos( radians(rake))
def strike_slip_from_offset(offset, dip, rake=0.):
return offset * cos( radians(rake))
def offset_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip / sin( radians(rake))
def dip_slip_from_offset(offset, dip, rake=90.):
return offset * sin( radians(rake))
def heave_from_offset(offset, dip, rake=90.):
apparent_dip = apparent_dip_from_dip_rake(dip, rake)
return offset * cos( radians(apparent_dip))
def offset_from_heave(heave, dip, rake=90.):
apparent_dip = apparent_dip_from_dip_rake(dip, rake)
return heave / cos( radians(apparent_dip))
## Others
def beta_from_dip_rake(dip, rake):
'''
Returns beta, the angle (in degrees) between the strike and the
trend of apparent dip.
'''
return degrees( arctan( tan(radians(rake)) * cos(radians(dip))))
def apparent_dip_from_dip_rake(dip, rake):
return degrees( arcsin( sin(radians(dip)) * sin(radians(rake))))
def hor_sep_from_vert_sep(vert_sep, dip, rake=90.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def vert_sep_from_hor_sep(hor_sep, dip, rake=90.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def dip_slip_from_vert_sep(vert_sep, dip, rake=90.):
return vert_sep / sin(radians(dip))
def vert_sep_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip * sin(radians(dip))
def strike_slip_from_vert_sep(vert_sep, dip, rake=0.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def vert_sep_from_strike_slip(strike_slip, dip, rake=0.):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def heave_from_vert_sep(vert_sep, dip, rake=90.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return heave_from_offset(offset, dip, rake)
def vert_sep_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def hor_sep_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip * cos(radians(dip))
def dip_slip_from_hor_sep(hor_sep, dip, rake=90.):
return hor_sep / cos(radians(dip))
def hor_sep_from_strike_slip(strike_slip, dip, rake=0.):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def strike_slip_from_hor_sep(hor_sep, dip, rake=0.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def hor_sep_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def heave_from_hor_sep(hor_sep, dip, rake=90.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return heave_from_offset(offset, dip, rake)
def dip_slip_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return dip_slip_from_offset(offset, dip, rake)
def heave_from_dip_slip(dip_slip, dip, rake=90.):
offset = offset_from_dip_slip(dip_slip, dip, rake)
return heave_from_offset(offset, dip, rake)
def dip_slip_from_strike_slip(strike_slip, dip, rake):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return dip_slip_from_offset(offset, dip, rake)
def strike_slip_from_dip_slip(dip_slip, dip, rake):
offset = offset_from_dip_slip(dip_slip, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def heave_from_strike_slip(strike_slip, dip, rake=0.):
hs = hor_sep_from_strike_slip(strike_slip, dip, rake)
return np.sqrt(strike_slip**2 + hs**2)
def strike_slip_from_heave(heave, dip, rake=0.):
offset = offset_from_heave(heave, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
# aggregator functions
def slip_components_from_offset(offset, dip, rake):
slip_comps = {'offset' : offset}
slip_comps['hor_sep'] = hor_sep_from_offset(offset, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_offset(offset, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_offset(offset, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_offset(offset, dip, rake)
slip_comps['heave'] = heave_from_offset(offset, dip, rake)
return slip_comps
def slip_components_from_hor_sep(hor_sep, dip, rake):
slip_comps = {'hor_sep' : hor_sep}
slip_comps['offset'] = offset_from_hor_sep(hor_sep, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_hor_sep(hor_sep, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_hor_sep(hor_sep, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_hor_sep(hor_sep, dip, rake)
slip_comps['heave'] = heave_from_hor_sep(hor_sep, dip, rake)
return slip_comps
def slip_components_from_vert_sep(vert_sep, dip, rake):
slip_comps = {'vert_sep' : vert_sep}
slip_comps['hor_sep'] = hor_sep_from_vert_sep(vert_sep, dip, rake)
slip_comps['offset'] = offset_from_vert_sep(vert_sep, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_vert_sep(vert_sep, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_vert_sep(vert_sep, dip, rake)
slip_comps['heave'] = heave_from_vert_sep(vert_sep, dip, rake)
return slip_comps
def slip_components_from_dip_slip(dip_slip, dip, rake):
slip_comps = {'dip_slip' : dip_slip}
slip_comps['hor_sep'] = hor_sep_from_dip_slip(dip_slip, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_dip_slip(dip_slip, dip, rake)
slip_comps['offset'] = offset_from_dip_slip(dip_slip, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_dip_slip(dip_slip, dip, rake)
slip_comps['heave'] = heave_from_dip_slip(dip_slip, dip, rake)
return slip_comps
def slip_components_from_strike_slip(strike_slip, dip, rake):
slip_comps = {'strike_slip' : strike_slip}
slip_comps['hor_sep'] = hor_sep_from_strike_slip(strike_slip, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_strike_slip(strike_slip, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_strike_slip(strike_slip, dip, rake)
slip_comps['offset'] = offset_from_strike_slip(strike_slip, dip, rake)
slip_comps['heave'] = heave_from_strike_slip(strike_slip, dip, rake)
return slip_comps
def slip_components_from_heave(heave, dip, rake):
slip_comps = {'heave' : heave}
slip_comps['hor_sep'] = hor_sep_from_heave(heave, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_heave(heave, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_heave(heave, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_heave(heave, dip, rake)
slip_comps['offset'] = offset_from_heave(heave, dip, rake)
return slip_comps
| |
# -*- test-case-name: wokkel.test.test_pubsub -*-
#
# Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details.
"""
XMPP publish-subscribe protocol.
This protocol is specified in
U{XEP-0060<http://www.xmpp.org/extensions/xep-0060.html>}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log
from twisted.words.protocols.jabber import jid, error
from twisted.words.xish import domish
from wokkel import disco, data_form, generic, shim
from wokkel.compat import IQ
from wokkel.subprotocols import IQHandlerMixin, XMPPHandler
from wokkel.iwokkel import IPubSubClient, IPubSubService, IPubSubResource
# Iq get and set XPath queries
IQ_GET = '/iq[@type="get"]'
IQ_SET = '/iq[@type="set"]'
# Publish-subscribe namespaces
NS_PUBSUB = 'http://jabber.org/protocol/pubsub'
NS_PUBSUB_EVENT = NS_PUBSUB + '#event'
NS_PUBSUB_ERRORS = NS_PUBSUB + '#errors'
NS_PUBSUB_OWNER = NS_PUBSUB + "#owner"
NS_PUBSUB_NODE_CONFIG = NS_PUBSUB + "#node_config"
NS_PUBSUB_META_DATA = NS_PUBSUB + "#meta-data"
NS_PUBSUB_SUBSCRIBE_OPTIONS = NS_PUBSUB + "#subscribe_options"
# XPath to match pubsub requests
PUBSUB_REQUEST = '/iq[@type="get" or @type="set"]/' + \
'pubsub[@xmlns="' + NS_PUBSUB + '" or ' + \
'@xmlns="' + NS_PUBSUB_OWNER + '"]'
class SubscriptionPending(Exception):
"""
Raised when the requested subscription is pending acceptance.
"""
class SubscriptionUnconfigured(Exception):
"""
Raised when the requested subscription needs to be configured before
becoming active.
"""
class PubSubError(error.StanzaError):
"""
Exception with publish-subscribe specific condition.
"""
def __init__(self, condition, pubsubCondition, feature=None, text=None):
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
if feature:
appCondition['feature'] = feature
error.StanzaError.__init__(self, condition,
text=text,
appCondition=appCondition)
class BadRequest(error.StanzaError):
"""
Bad request stanza error.
"""
def __init__(self, pubsubCondition=None, text=None):
if pubsubCondition:
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
else:
appCondition = None
error.StanzaError.__init__(self, 'bad-request',
text=text,
appCondition=appCondition)
class Unsupported(PubSubError):
def __init__(self, feature, text=None):
self.feature = feature
PubSubError.__init__(self, 'feature-not-implemented',
'unsupported',
feature,
text)
def __str__(self):
message = PubSubError.__str__(self)
message += ', feature %r' % self.feature
return message
class Subscription(object):
"""
A subscription to a node.
@ivar nodeIdentifier: The identifier of the node subscribed to.
The root node is denoted by C{None}.
@ivar subscriber: The subscribing entity.
@ivar state: The subscription state. One of C{'subscribed'}, C{'pending'},
C{'unconfigured'}.
@ivar options: Optional list of subscription options.
@type options: C{dict}.
"""
def __init__(self, nodeIdentifier, subscriber, state, options=None):
self.nodeIdentifier = nodeIdentifier
self.subscriber = subscriber
self.state = state
self.options = options or {}
class Item(domish.Element):
"""
Publish subscribe item.
This behaves like an object providing L{domish.IElement}.
Item payload can be added using C{addChild} or C{addRawXml}, or using the
C{payload} keyword argument to C{__init__}.
"""
def __init__(self, id=None, payload=None):
"""
@param id: optional item identifier
@type id: L{unicode}
@param payload: optional item payload. Either as a domish element, or
as serialized XML.
@type payload: object providing L{domish.IElement} or L{unicode}.
"""
domish.Element.__init__(self, (NS_PUBSUB, 'item'))
if id is not None:
self['id'] = id
if payload is not None:
if isinstance(payload, basestring):
self.addRawXml(payload)
else:
self.addChild(payload)
class PubSubRequest(generic.Stanza):
"""
A publish-subscribe request.
The set of instance variables used depends on the type of request. If
a variable is not applicable or not passed in the request, its value is
C{None}.
@ivar verb: The type of publish-subscribe request. See L{_requestVerbMap}.
@type verb: C{str}.
@ivar affiliations: Affiliations to be modified.
@type affiliations: C{set}
@ivar items: The items to be published, as L{domish.Element}s.
@type items: C{list}
@ivar itemIdentifiers: Identifiers of the items to be retrieved or
retracted.
@type itemIdentifiers: C{set}
@ivar maxItems: Maximum number of items to retrieve.
@type maxItems: C{int}.
@ivar nodeIdentifier: Identifier of the node the request is about.
@type nodeIdentifier: C{unicode}
@ivar nodeType: The type of node that should be created, or for which the
configuration is retrieved. C{'leaf'} or C{'collection'}.
@type nodeType: C{str}
@ivar options: Configurations options for nodes, subscriptions and publish
requests.
@type options: L{data_form.Form}
@ivar subscriber: The subscribing entity.
@type subscriber: L{JID}
@ivar subscriptionIdentifier: Identifier for a specific subscription.
@type subscriptionIdentifier: C{unicode}
@ivar subscriptions: Subscriptions to be modified, as a set of
L{Subscription}.
@type subscriptions: C{set}
"""
verb = None
affiliations = None
items = None
itemIdentifiers = None
maxItems = None
nodeIdentifier = None
nodeType = None
options = None
subscriber = None
subscriptionIdentifier = None
subscriptions = None
# Map request iq type and subelement name to request verb
_requestVerbMap = {
('set', NS_PUBSUB, 'publish'): 'publish',
('set', NS_PUBSUB, 'subscribe'): 'subscribe',
('set', NS_PUBSUB, 'unsubscribe'): 'unsubscribe',
('get', NS_PUBSUB, 'options'): 'optionsGet',
('set', NS_PUBSUB, 'options'): 'optionsSet',
('get', NS_PUBSUB, 'subscriptions'): 'subscriptions',
('get', NS_PUBSUB, 'affiliations'): 'affiliations',
('set', NS_PUBSUB, 'create'): 'create',
('get', NS_PUBSUB_OWNER, 'default'): 'default',
('get', NS_PUBSUB_OWNER, 'configure'): 'configureGet',
('set', NS_PUBSUB_OWNER, 'configure'): 'configureSet',
('get', NS_PUBSUB, 'items'): 'items',
('set', NS_PUBSUB, 'retract'): 'retract',
('set', NS_PUBSUB_OWNER, 'purge'): 'purge',
('set', NS_PUBSUB_OWNER, 'delete'): 'delete',
('get', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsGet',
('set', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsSet',
('get', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsGet',
('set', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsSet',
}
# Map request verb to request iq type and subelement name
_verbRequestMap = dict(((v, k) for k, v in _requestVerbMap.iteritems()))
# Map request verb to parameter handler names
_parameters = {
'publish': ['node', 'items'],
'subscribe': ['nodeOrEmpty', 'jid'],
'unsubscribe': ['nodeOrEmpty', 'jid'],
'optionsGet': ['nodeOrEmpty', 'jid'],
'optionsSet': ['nodeOrEmpty', 'jid', 'options'],
'subscriptions': [],
'affiliations': [],
'create': ['nodeOrNone', 'configure'],
'default': ['default'],
'configureGet': ['nodeOrEmpty'],
'configureSet': ['nodeOrEmpty', 'configure'],
'items': ['node', 'maxItems', 'itemIdentifiers'],
'retract': ['node', 'itemIdentifiers'],
'purge': ['node'],
'delete': ['node'],
'affiliationsGet': ['nodeOrEmpty'],
'affiliationsSet': [],
'subscriptionsGet': ['nodeOrEmpty'],
'subscriptionsSet': [],
}
def __init__(self, verb=None):
self.verb = verb
@staticmethod
def _findForm(element, formNamespace):
"""
Find a Data Form.
Look for an element that represents a Data Form with the specified
form namespace as a child element of the given element.
"""
if not element:
return None
form = None
for child in element.elements():
try:
form = data_form.Form.fromElement(child)
except data_form.Error:
continue
if form.formNamespace != NS_PUBSUB_NODE_CONFIG:
continue
return form
def _parse_node(self, verbElement):
"""
Parse the required node identifier out of the verbElement.
"""
try:
self.nodeIdentifier = verbElement["node"]
except KeyError:
raise BadRequest('nodeid-required')
def _render_node(self, verbElement):
"""
Render the required node identifier on the verbElement.
"""
if not self.nodeIdentifier:
raise Exception("Node identifier is required")
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrEmpty(self, verbElement):
"""
Parse the node identifier out of the verbElement. May be empty.
"""
self.nodeIdentifier = verbElement.getAttribute("node", '')
def _render_nodeOrEmpty(self, verbElement):
"""
Render the node identifier on the verbElement. May be empty.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrNone(self, verbElement):
"""
Parse the optional node identifier out of the verbElement.
"""
self.nodeIdentifier = verbElement.getAttribute("node")
def _render_nodeOrNone(self, verbElement):
"""
Render the optional node identifier on the verbElement.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_items(self, verbElement):
"""
Parse items out of the verbElement for publish requests.
"""
self.items = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
self.items.append(element)
def _render_items(self, verbElement):
"""
Render items into the verbElement for publish requests.
"""
if self.items:
for item in self.items:
verbElement.addChild(item)
def _parse_jid(self, verbElement):
"""
Parse subscriber out of the verbElement for un-/subscribe requests.
"""
try:
self.subscriber = jid.internJID(verbElement["jid"])
except KeyError:
raise BadRequest('jid-required')
def _render_jid(self, verbElement):
"""
Render subscriber into the verbElement for un-/subscribe requests.
"""
verbElement['jid'] = self.subscriber.full()
def _parse_default(self, verbElement):
"""
Parse node type out of a request for the default node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form and form.formType == 'submit':
values = form.getValues()
self.nodeType = values.get('pubsub#node_type', 'leaf')
else:
self.nodeType = 'leaf'
def _parse_configure(self, verbElement):
"""
Parse options out of a request for setting the node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing configuration form")
def _parse_itemIdentifiers(self, verbElement):
"""
Parse item identifiers out of items and retract requests.
"""
self.itemIdentifiers = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
try:
self.itemIdentifiers.append(element["id"])
except KeyError:
raise BadRequest()
def _render_itemIdentifiers(self, verbElement):
"""
Render item identifiers into items and retract requests.
"""
if self.itemIdentifiers:
for itemIdentifier in self.itemIdentifiers:
item = verbElement.addElement('item')
item['id'] = itemIdentifier
def _parse_maxItems(self, verbElement):
"""
Parse maximum items out of an items request.
"""
value = verbElement.getAttribute('max_items')
if value:
try:
self.maxItems = int(value)
except ValueError:
raise BadRequest(text="Field max_items requires a positive " +
"integer value")
def _render_maxItems(self, verbElement):
"""
Parse maximum items into an items request.
"""
if self.maxItems:
verbElement['max_items'] = unicode(self.maxItems)
def _render_configure(self, verbElement):
if self.options:
verbElement.addChild(self.options.toElement())
def _parse_options(self, verbElement):
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_SUBSCRIBE_OPTIONS)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing options form")
def parseElement(self, element):
"""
Parse the publish-subscribe verb and parameters out of a request.
"""
generic.Stanza.parseElement(self, element)
for child in element.pubsub.elements():
key = (self.stanzaType, child.uri, child.name)
try:
verb = self._requestVerbMap[key]
except KeyError:
continue
else:
self.verb = verb
break
if not self.verb:
raise NotImplementedError()
for parameter in self._parameters[verb]:
getattr(self, '_parse_%s' % parameter)(child)
def send(self, xs):
"""
Send this request to its recipient.
This renders all of the relevant parameters for this specific
requests into an L{IQ}, and invoke its C{send} method.
This returns a deferred that fires upon reception of a response. See
L{IQ} for details.
@param xs: The XML stream to send the request on.
@type xs: L{xmlstream.XmlStream}
@rtype: L{defer.Deferred}.
"""
try:
(self.stanzaType,
childURI,
childName) = self._verbRequestMap[self.verb]
except KeyError:
raise NotImplementedError("Unhandled verb: " + str(self.verb))
iq = IQ(xs, self.stanzaType)
iq.addElement((childURI, 'pubsub'))
verbElement = iq.pubsub.addElement(childName)
if self.sender:
iq['from'] = self.sender.full()
if self.recipient:
iq['to'] = self.recipient.full()
for parameter in self._parameters[self.verb]:
getattr(self, '_render_%s' % parameter)(verbElement)
return iq.send()
class PubSubEvent(object):
"""
A publish subscribe event.
@param sender: The entity from which the notification was received.
@type sender: L{jid.JID}
@param recipient: The entity to which the notification was sent.
@type recipient: L{wokkel.pubsub.ItemsEvent}
@param nodeIdentifier: Identifier of the node the event pertains to.
@type nodeIdentifier: C{unicode}
@param headers: SHIM headers, see L{wokkel.shim.extractHeaders}.
@type headers: L{dict}
"""
def __init__(self, sender, recipient, nodeIdentifier, headers):
self.sender = sender
self.recipient = recipient
self.nodeIdentifier = nodeIdentifier
self.headers = headers
class ItemsEvent(PubSubEvent):
"""
A publish-subscribe event that signifies new, updated and retracted items.
@param items: List of received items as domish elements.
@type items: C{list} of L{domish.Element}
"""
def __init__(self, sender, recipient, nodeIdentifier, items, headers):
PubSubEvent.__init__(self, sender, recipient, nodeIdentifier, headers)
self.items = items
class DeleteEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the deletion of a node.
"""
redirectURI = None
class PurgeEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the purging of a node.
"""
class PubSubClient(XMPPHandler):
"""
Publish subscribe client protocol.
"""
implements(IPubSubClient)
def connectionInitialized(self):
self.xmlstream.addObserver('/message/event[@xmlns="%s"]' %
NS_PUBSUB_EVENT, self._onEvent)
def _onEvent(self, message):
try:
sender = jid.JID(message["from"])
recipient = jid.JID(message["to"])
except KeyError:
return
actionElement = None
for element in message.event.elements():
if element.uri == NS_PUBSUB_EVENT:
actionElement = element
if not actionElement:
return
eventHandler = getattr(self, "_onEvent_%s" % actionElement.name, None)
if eventHandler:
headers = shim.extractHeaders(message)
eventHandler(sender, recipient, actionElement, headers)
message.handled = True
def _onEvent_items(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
items = [element for element in action.elements()
if element.name in ('item', 'retract')]
event = ItemsEvent(sender, recipient, nodeIdentifier, items, headers)
self.itemsReceived(event)
def _onEvent_delete(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = DeleteEvent(sender, recipient, nodeIdentifier, headers)
if action.redirect:
event.redirectURI = action.redirect.getAttribute('uri')
self.deleteReceived(event)
def _onEvent_purge(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = PurgeEvent(sender, recipient, nodeIdentifier, headers)
self.purgeReceived(event)
def itemsReceived(self, event):
pass
def deleteReceived(self, event):
pass
def purgeReceived(self, event):
pass
def _addOptionsFromDict(self, request, conf):
form = data_form.Form(formType="submit",
formNamespace=NS_PUBSUB_NODE_CONFIG)
for k,v in conf.iteritems():
if getattr(v, '__iter__', False) and not isinstance(v, basestring):
form.addField(data_form.Field(fieldType='text-multi',
var=k, values=[str(x) for x in v]))
else:
form.addField(data_form.Field(var=k, value=str(v)))
request.options = form
def createNode(self, service, nodeIdentifier=None, sender=None, conf={}):
"""
Create a publish subscribe node.
@param service: The publish subscribe service to create the node at.
@type service: L{JID}
@param nodeIdentifier: Optional suggestion for the id of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('create')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
if conf:
self._addOptionsFromDict(request, conf)
def cb(iq):
try:
new_node = iq.pubsub.create["node"]
except AttributeError:
# the suggested node identifier was accepted
new_node = nodeIdentifier
return new_node
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def configureNode(self, service, nodeIdentifier, conf={}, sender=None):
"""
Apply a configuration to a node.
@param service: The pubsub service where the node exists
@type service: L{JID}
@param conf: form values to configure
@type conf: dict
@param nodeIdentifier: Identifier of the node to configure
@type nodeIdentifier: C{unicode}
@param sender: The entity from which the notification should be sent
@type sender: L{JID}
"""
request = PubSubRequest('configureSet')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
self._addOptionsFromDict(request, conf)
return request.send(self.xmlstream)
def getNodeConfiguration(self, service, nodeIdentifier, sender=None):
"""
Apply a configuration to a node.
@param service: The pubsub service where the node exists
@type service: L{JID}
@param nodeIdentifier: Identifier of the node to configure
@type nodeIdentifier: C{unicode}
@param sender: The entity from which the notification should be sent
@type sender: L{JID}
"""
request = PubSubRequest('configureGet')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
return request.send(self.xmlstream)
def deleteNode(self, service, nodeIdentifier, sender=None):
"""
Delete a publish subscribe node.
@param service: The publish subscribe service to delete the node from.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('delete')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
return request.send(self.xmlstream)
def subscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Subscribe to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to subscribe to the node. This entity
will get notifications of new published items.
@type subscriber: L{JID}
"""
request = PubSubRequest('subscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
def cb(iq):
subscription = iq.pubsub.subscription["subscription"]
if subscription == 'pending':
raise SubscriptionPending
elif subscription == 'unconfigured':
raise SubscriptionUnconfigured
else:
# we assume subscription == 'subscribed'
# any other value would be invalid, but that should have
# yielded a stanza error.
return None
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def unsubscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Unsubscribe from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to unsubscribe from the node.
@type subscriber: L{JID}
"""
request = PubSubRequest('unsubscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
return request.send(self.xmlstream)
def publish(self, service, nodeIdentifier, items=None, sender=None):
"""
Publish to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param items: Optional list of L{Item}s to publish.
@type items: C{list}
"""
request = PubSubRequest('publish')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.items = items
request.sender = sender
return request.send(self.xmlstream)
def items(self, service, nodeIdentifier, maxItems=None, sender=None):
"""
Retrieve previously published items from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param maxItems: Optional limit on the number of retrieved items.
@type maxItems: C{int}
"""
request = PubSubRequest('items')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
if maxItems:
request.maxItems = str(int(maxItems))
request.sender = sender
def cb(iq):
items = []
for element in iq.pubsub.items.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
items.append(element)
return items
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
class PubSubService(XMPPHandler, IQHandlerMixin):
"""
Protocol implementation for a XMPP Publish Subscribe Service.
The word Service here is used as taken from the Publish Subscribe
specification. It is the party responsible for keeping nodes and their
subscriptions, and sending out notifications.
Methods from the L{IPubSubService} interface that are called as
a result of an XMPP request may raise exceptions. Alternatively the
deferred returned by these methods may have their errback called. These are
handled as follows:
- If the exception is an instance of L{error.StanzaError}, an error
response iq is returned.
- Any other exception is reported using L{log.msg}. An error response
with the condition C{internal-server-error} is returned.
The default implementation of said methods raises an L{Unsupported}
exception and are meant to be overridden.
@ivar discoIdentity: Service discovery identity as a dictionary with
keys C{'category'}, C{'type'} and C{'name'}.
@ivar pubSubFeatures: List of supported publish-subscribe features for
service discovery, as C{str}.
@type pubSubFeatures: C{list} or C{None}
"""
implements(IPubSubService)
iqHandlers = {
'/*': '_onPubSubRequest',
}
_legacyHandlers = {
'publish': ('publish', ['sender', 'recipient',
'nodeIdentifier', 'items']),
'subscribe': ('subscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'unsubscribe': ('unsubscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'subscriptions': ('subscriptions', ['sender', 'recipient']),
'affiliations': ('affiliations', ['sender', 'recipient']),
'create': ('create', ['sender', 'recipient', 'nodeIdentifier']),
'getConfigurationOptions': ('getConfigurationOptions', []),
'default': ('getDefaultConfiguration',
['sender', 'recipient', 'nodeType']),
'configureGet': ('getConfiguration', ['sender', 'recipient',
'nodeIdentifier']),
'configureSet': ('setConfiguration', ['sender', 'recipient',
'nodeIdentifier', 'options']),
'items': ('items', ['sender', 'recipient', 'nodeIdentifier',
'maxItems', 'itemIdentifiers']),
'retract': ('retract', ['sender', 'recipient', 'nodeIdentifier',
'itemIdentifiers']),
'purge': ('purge', ['sender', 'recipient', 'nodeIdentifier']),
'delete': ('delete', ['sender', 'recipient', 'nodeIdentifier']),
}
hideNodes = False
def __init__(self, resource=None):
self.resource = resource
self.discoIdentity = {'category': 'pubsub',
'type': 'generic',
'name': 'Generic Publish-Subscribe Service'}
self.pubSubFeatures = []
def connectionMade(self):
self.xmlstream.addObserver(PUBSUB_REQUEST, self.handleRequest)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
def toInfo(nodeInfo, info):
if not nodeInfo:
return info
(nodeType, metaData) = nodeInfo['type'], nodeInfo['meta-data']
info.append(disco.DiscoIdentity('pubsub', nodeType))
if metaData:
form = data_form.Form(formType="result",
formNamespace=NS_PUBSUB_META_DATA)
form.addField(
data_form.Field(
var='pubsub#node_type',
value=nodeType,
label='The type of node (collection or leaf)'
)
)
for metaDatum in metaData:
form.addField(data_form.Field.fromDict(metaDatum))
info.append(form)
return info
info = []
request = PubSubRequest('discoInfo')
if self.resource is not None:
resource = self.resource.locateResource(request)
identity = resource.discoIdentity
features = resource.features
getInfo = resource.getInfo
else:
category, idType, name = self.discoIdentity
identity = disco.DiscoIdentity(category, idType, name)
features = self.pubSubFeatures
getInfo = self.getNodeInfo
if not nodeIdentifier:
info.append(identity)
info.append(disco.DiscoFeature(disco.NS_DISCO_ITEMS))
info.extend([disco.DiscoFeature("%s#%s" % (NS_PUBSUB, feature))
for feature in features])
d = getInfo(requestor, target, nodeIdentifier or '')
d.addCallback(toInfo, info)
d.addErrback(log.err)
return d
def getDiscoItems(self, requestor, target, nodeIdentifier):
if self.hideNodes:
d = defer.succeed([])
elif self.resource is not None:
request = PubSubRequest('discoInfo')
resource = self.resource.locateResource(request)
d = resource.getNodes(requestor, target, nodeIdentifier)
elif nodeIdentifier:
d = self.getNodes(requestor, target)
else:
d = defer.succeed([])
d.addCallback(lambda nodes: [disco.DiscoItem(target, node)
for node in nodes])
return d
def _onPubSubRequest(self, iq):
request = PubSubRequest.fromElement(iq)
if self.resource is not None:
resource = self.resource.locateResource(request)
else:
resource = self
# Preprocess the request, knowing the handling resource
try:
preProcessor = getattr(self, '_preProcess_%s' % request.verb)
except AttributeError:
pass
else:
request = preProcessor(resource, request)
if request is None:
return defer.succeed(None)
# Process the request itself,
if resource is not self:
try:
handler = getattr(resource, request.verb)
except AttributeError:
# fix lookup feature
text = "Request verb: %s" % request.verb
return defer.fail(Unsupported('', text))
d = handler(request)
else:
handlerName, argNames = self._legacyHandlers[request.verb]
handler = getattr(self, handlerName)
args = [getattr(request, arg) for arg in argNames]
d = handler(*args)
# If needed, translate the result into a response
try:
cb = getattr(self, '_toResponse_%s' % request.verb)
except AttributeError:
pass
else:
d.addCallback(cb, resource, request)
return d
def _toResponse_subscribe(self, result, resource, request):
response = domish.Element((NS_PUBSUB, "pubsub"))
subscription = response.addElement("subscription")
if result.nodeIdentifier:
subscription["node"] = result.nodeIdentifier
subscription["jid"] = result.subscriber.full()
subscription["subscription"] = result.state
return response
def _toResponse_subscriptions(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
subscriptions = response.addElement('subscriptions')
for subscription in result:
item = subscriptions.addElement('subscription')
item['node'] = subscription.nodeIdentifier
item['jid'] = subscription.subscriber.full()
item['subscription'] = subscription.state
return response
def _toResponse_affiliations(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
affiliations = response.addElement('affiliations')
for nodeIdentifier, affiliation in result:
item = affiliations.addElement('affiliation')
item['node'] = nodeIdentifier
item['affiliation'] = affiliation
return response
def _toResponse_create(self, result, resource, request):
if not request.nodeIdentifier or request.nodeIdentifier != result:
response = domish.Element((NS_PUBSUB, 'pubsub'))
create = response.addElement('create')
create['node'] = result
return response
else:
return None
def _makeFields(self, options, values):
fields = []
for name, value in values.iteritems():
if name not in options:
continue
option = {'var': name}
option.update(options[name])
if isinstance(value, list):
option['values'] = value
else:
option['value'] = value
fields.append(data_form.Field.fromDict(option))
return fields
def _formFromConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
fields = self._makeFields(options, values)
form = data_form.Form(formType="form",
formNamespace=NS_PUBSUB_NODE_CONFIG,
fields=fields)
return form
def _checkConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
processedValues = {}
for key, value in values.iteritems():
if key not in options:
continue
option = {'var': key}
option.update(options[key])
field = data_form.Field.fromDict(option)
if isinstance(value, list):
field.values = value
else:
field.value = value
field.typeCheck()
if isinstance(value, list):
processedValues[key] = field.values
else:
processedValues[key] = field.value
return processedValues
def _preProcess_default(self, resource, request):
if request.nodeType not in ('leaf', 'collection'):
raise error.StanzaError('not-acceptable')
else:
return request
def _toResponse_default(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
default = response.addElement("default")
form = self._formFromConfiguration(resource, options)
default.addChild(form.toElement())
return response
def _toResponse_configureGet(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
configure = response.addElement("configure")
form = self._formFromConfiguration(resource, options)
configure.addChild(form.toElement())
if request.nodeIdentifier:
configure["node"] = request.nodeIdentifier
return response
def _preProcess_configureSet(self, resource, request):
if request.options:
request.options = self._checkConfiguration(resource,
request.options)
return request
else:
return None
def _toResponse_items(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
items = response.addElement('items')
items["node"] = request.nodeIdentifier
for item in result:
items.addChild(item)
return response
def _createNotification(self, eventType, service, nodeIdentifier,
subscriber, subscriptions=None):
headers = []
if subscriptions:
for subscription in subscriptions:
if nodeIdentifier != subscription.nodeIdentifier:
headers.append(('Collection', subscription.nodeIdentifier))
message = domish.Element((None, "message"))
message["from"] = service.full()
message["to"] = subscriber.full()
event = message.addElement((NS_PUBSUB_EVENT, "event"))
element = event.addElement(eventType)
element["node"] = nodeIdentifier
if headers:
message.addChild(shim.Headers(headers))
return message
# public methods
def notifyPublish(self, service, nodeIdentifier, notifications):
for subscriber, subscriptions, items in notifications:
message = self._createNotification('items', service,
nodeIdentifier, subscriber,
subscriptions)
message.event.items.children = items
self.send(message)
def notifyDelete(self, service, nodeIdentifier, subscribers,
redirectURI=None):
for subscriber in subscribers:
message = self._createNotification('delete', service,
nodeIdentifier,
subscriber)
if redirectURI:
redirect = message.event.delete.addElement('redirect')
redirect['uri'] = redirectURI
self.send(message)
def getNodeInfo(self, requestor, service, nodeIdentifier):
return None
def getNodes(self, requestor, service):
return []
def publish(self, requestor, service, nodeIdentifier, items):
raise Unsupported('publish')
def subscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def unsubscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def subscriptions(self, requestor, service):
raise Unsupported('retrieve-subscriptions')
def affiliations(self, requestor, service):
raise Unsupported('retrieve-affiliations')
def create(self, requestor, service, nodeIdentifier):
raise Unsupported('create-nodes')
def getConfigurationOptions(self):
return {}
def getDefaultConfiguration(self, requestor, service, nodeType):
raise Unsupported('retrieve-default')
def getConfiguration(self, requestor, service, nodeIdentifier):
raise Unsupported('config-node')
def setConfiguration(self, requestor, service, nodeIdentifier, options):
raise Unsupported('config-node')
def items(self, requestor, service, nodeIdentifier, maxItems,
itemIdentifiers):
raise Unsupported('retrieve-items')
def retract(self, requestor, service, nodeIdentifier, itemIdentifiers):
raise Unsupported('retract-items')
def purge(self, requestor, service, nodeIdentifier):
raise Unsupported('purge-nodes')
def delete(self, requestor, service, nodeIdentifier):
raise Unsupported('delete-nodes')
class PubSubResource(object):
implements(IPubSubResource)
features = []
discoIdentity = disco.DiscoIdentity('pubsub',
'service',
'Publish-Subscribe Service')
def locateResource(self, request):
return self
def getInfo(self, requestor, service, nodeIdentifier):
return defer.succeed(None)
def getNodes(self, requestor, service, nodeIdentifier):
return defer.succeed([])
def getConfigurationOptions(self):
return {}
def publish(self, request):
return defer.fail(Unsupported('publish'))
def subscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def unsubscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def subscriptions(self, request):
return defer.fail(Unsupported('retrieve-subscriptions'))
def affiliations(self, request):
return defer.fail(Unsupported('retrieve-affiliations'))
def create(self, request):
return defer.fail(Unsupported('create-nodes'))
def default(self, request):
return defer.fail(Unsupported('retrieve-default'))
def configureGet(self, request):
return defer.fail(Unsupported('config-node'))
def configureSet(self, request):
return defer.fail(Unsupported('config-node'))
def items(self, request):
return defer.fail(Unsupported('retrieve-items'))
def retract(self, request):
return defer.fail(Unsupported('retract-items'))
def purge(self, request):
return defer.fail(Unsupported('purge-nodes'))
def delete(self, request):
return defer.fail(Unsupported('delete-nodes'))
def affiliationsGet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def affiliationsSet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def subscriptionsGet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
def subscriptionsSet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
| |
from mock import Mock
from placidity.interpreter import Context, Commands, Interpreter
from py.test import raises
# TODO: convert execute asserts to assert_called_with and handle
# return with return_value. Note that it's possible to mock the
# signatures in Mock 0.7 (fix after release or include svn version in
# /lib)
class TestContext:
def setup_method(self, method):
self.context = Context()
def test_claim_for(self):
assert self.context.owner == None
self.context.claim_for('foobar')
assert self.context.owner == 'foobar'
def test_release(self):
self.context.claim_for('barfoo')
assert self.context.owner == 'barfoo'
self.context.release()
assert self.context.owner == None
class TestCommands:
def test_find_single(self):
class Foo:
pass
command = Foo()
commands = Commands(command)
assert commands.find(name='foo') == command
def test_find_nothing(self):
commands = Commands()
assert commands.find(name='foo') == None
def test_find_based_on_priority(self):
class Bar:
priority = 'low'
class Foo:
priority = 'normal'
class Help:
priority = 'normal'
command1 = Bar()
command2 = Foo()
command3 = Help()
commands = Commands((command1, command2, command3))
assert commands.find(priority='low') == [command1, ]
multiple = commands.find(priority='normal')
assert command2 in multiple
assert command3 in multiple
class TestInterpreter:
def test_exception(self):
interpreter = Interpreter()
assert interpreter.interpret('foobar') == 'null'
def test_none(self):
interpreter = Interpreter()
assert interpreter.interpret(None) == None
def test_system_exit(self):
def quit():
raise SystemExit
command = self.create_command('quit', execute_method=quit)
interpreter = Interpreter(command)
raises(SystemExit, interpreter.interpret, 'quit')
def test_context_owner_set(self):
def execute_1():
return 'foo'
command1 = self.create_command('foobar', execute_method=execute_1)
def execute_2(expression):
if expression == 'foobar':
return None
return 'bar'
command2 = self.create_command('bar', execute_method=execute_2)
interpreter = Interpreter([command1, command2])
interpreter.context.claim_for(command2)
assert interpreter.interpret('foobar') == None
assert interpreter.interpret('bar') == 'bar'
def test_no_return(self):
def execute():
pass
command = self.create_command('foo', execute_method=execute)
interpreter = Interpreter(command)
assert interpreter.interpret('foo') == None
def test_priority(self):
def execute_1():
return 'foo'
command1 = self.create_command('bar', 'high', execute_1)
def execute_2():
return 'BYE'
command2 = self.create_command('bar', 'normal', execute_2)
def execute_3():
return 'BYEBYE'
command3 = self.create_command('bar', 'low', execute_3)
interpreter = Interpreter([command1, command2, command3])
assert interpreter.interpret('bar') == 'foo'
def test_execute_parameters(self):
def no_parameters():
return 'executed command'
def with_context(context):
assert context.owner == None
return 'executed command'
def with_commands(commands):
assert commands == [command, ]
return 'executed command'
def with_expression(expression):
assert expression == 'command'
return 'executed command'
def with_variables(variables):
assert variables == {}
return 'executed command'
def with_multiple_parameters(expression, commands,
variables, context):
assert context.owner == None
assert commands == [command, ]
assert expression == 'command'
assert variables == {}
return 'executed command'
execute_methods = (no_parameters, with_context, with_commands,
with_expression, with_variables, with_multiple_parameters, )
command = self.create_command('command')
interpreter = Interpreter(command)
for execute_method in execute_methods:
command.execute = execute_method
assert interpreter.interpret('command') == 'executed command', \
execute_method.__name__ + ' failed!'
command.matches.assert_called_with('command')
def test_execute_command(self):
def execute():
return 'executed command'
command1 = self.create_command('foo', execute_method=execute)
command2 = self.create_command('bar', execute_method=execute)
interpreter = Interpreter([command1, command2, ])
assert interpreter.interpret('foo') == 'executed command'
def create_command(self, name, priority='normal', execute_method=None):
command = Mock()
command.aliases = name
command.matches = Mock()
command.matches.return_value = True
if execute_method:
command.execute = execute_method
command.priority = priority
return command
| |
'''
read & write (:mod:`calour.io`)
===============================
.. currentmodule:: calour.io
Functions
^^^^^^^^^
.. autosummary::
:toctree: generated
read
read_amplicon
read_qiime2
read_ms
save
save_fasta
save_biom
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
import zipfile
import tempfile
import pandas as pd
import numpy as np
import biom
from . import Experiment, AmpliconExperiment, MS1Experiment
from .util import get_file_md5, get_data_md5, _get_taxonomy_string
from ._doc import ds
from .database import _get_database_class
logger = getLogger(__name__)
def _file_from_zip(tempdir, data_file, internal_data):
'''extract the data file from a regular/qza filename into the tempdir, and return the filename
Parameters
----------
tmpdir: str
name of the directory to extract the zip into
data_file: str
original name of the file (could be '.qza' or not)
internale_data: str
the internal qiime2 qza file name (i.e. 'data/feature-table.biom' for biom table etc.)
Returns
-------
str: name of data file inside the tempdir
'''
if not zipfile.is_zipfile(data_file):
raise ValueError('the qiime2 file %s is not a valid zip file. Is it a qiime2 artifact (.qza) file?' % data_file)
with zipfile.ZipFile(data_file) as fl:
internal_name = None
for fname in fl.namelist():
if fname.endswith(internal_data):
internal_name = fname
data_file = fl.extract(internal_name, tempdir)
return data_file
raise ValueError('No data file (%s) in qza file %s. is it the appropriate qiime2 file?' % (internal_data, data_file))
def _read_qiime2_zip(fp, transpose=True):
'''Read in a qiime2 qza biom table
NOTE: this function reads2 the qiime2 qza artifacts files using unzip rather than relying on qiime2.
This enables reading qiime2 artifacts without activating the qiime2 environment
Parameters
----------
fp : str
file path to the qiime2 (.qza) biom table
transpose : bool (True by default)
Transpose the table or not. The biom table has samples in
column while sklearn and other packages require samples in
row. So you should transpose the data table.
Returns
-------
sid : list of str
the sample ids
fid : list of str
the feature ids
data : numpy array (2d) of float
the table
feature_md : pandas.DataFrame
the feature metadata (if availble in table)
'''
logger.debug('loading qiime2 feature table %s' % fp)
# load the feature table file
with tempfile.TemporaryDirectory() as tempdir:
oname = _file_from_zip(tempdir, fp, internal_data='data/feature-table.biom')
sid, fid, data, fmd = _read_biom(oname, transpose=transpose)
return sid, fid, data, fmd
def read_qiime2(fp, sample_metadata_file=None, rep_seq_file=None, taxonomy_file=None, **kwargs):
'''Read a qiime2 artifact files into an amplicon experiment.
.. note:: It converts feature metadata index (sequences) to
upper case.
Parameters
----------
fp: str
file path of the qiime2 feature table .qza artifact file
sample_metadata_file : None or str, optional
None (default) to just use sample names (no additional metadata).
if not None, file path to the sample metadata (aka mapping file in QIIME).
rep_seq_file: None or str, optional
None (default) to use the feature ids in the feature table
if not None, file path to the qiime2 representative sequences artifact file (defined by the qiime2 --o-representative-sequences parameter)
taxonomy_file: None or str, optional
if not None, add taxonomy for each feature using the qiime2 taxonomy artifact file (output of the qiime2 feature-classifier command)
Keyword Arguments
-----------------
%(io.read.parameters)s
'''
newexp = read_amplicon(fp, sample_metadata_file=sample_metadata_file, data_file_type='qiime2', **kwargs)
with tempfile.TemporaryDirectory() as tempdir:
# if rep-seqs file is supplied, translate hashes to sequences
if rep_seq_file is not None:
logger.debug('loading rep_seqs file %s' % rep_seq_file)
rs_name = _file_from_zip(tempdir, rep_seq_file, internal_data='data/dna-sequences.fasta')
rseqs = []
rids = []
for chead, cseq in _iter_fasta(rs_name):
rseqs.append(cseq.upper())
rids.append(chead)
rep_seqs = pd.Series(data=rseqs, index=rids, name='_feature_id')
# test if all hashes are identical to the rep_seqs file supplied
if not newexp.feature_metadata.index.equals(rep_seqs.index):
logger.info('Rep seqs hashes and table hashes are not equal. Using table hashes.')
# switch the columns so now _feature_id (and the index) is the sequence and not the hash. The hash is copied to '_hash'
newexp.feature_metadata.rename(columns={'_feature_id': '_hash'}, inplace=True)
newexp.feature_metadata = newexp.feature_metadata.join(other=rep_seqs, on='_hash', how='left')
newexp.feature_metadata.set_index('_feature_id', inplace=True, drop=False)
# if taxonomy file is supplied, load it into the feature metadata
if taxonomy_file is not None:
logger.debug('loading taxonomy file %s' % taxonomy_file)
tax_name = _file_from_zip(tempdir, taxonomy_file, internal_data='data/taxonomy.tsv')
taxonomy_df = pd.read_table(tax_name)
taxonomy_df.set_index('Feature ID', inplace=True)
newexp.feature_metadata = newexp.feature_metadata.join(other=taxonomy_df, how='left')
if len(newexp.feature_metadata.index.intersection(taxonomy_df.index)) == 0:
logger.info('No matching sequences in taxonomy file.')
if '_hash' in newexp.feature_metadata.columns:
logger.info('Trying to use hashes for taxonomy')
newexp.feature_metadata = newexp.feature_metadata.drop(taxonomy_df.columns, axis=1)
newexp.feature_metadata = newexp.feature_metadata.join(other=taxonomy_df, on='_hash', how='left')
return newexp
def _read_biom(fp, transpose=True):
'''Read in a biom table file.
Parameters
----------
fp : str or file object
file path to the biom table
transpose : bool (True by default)
Transpose the table or not. The biom table has samples in
column while sklearn and other packages require samples in
row. So you should transpose the data table.
Returns
-------
sid : list of str
the sample ids
fid : list of str
the feature ids
data : numpy array (2d) of float
the table
feature_md : pandas.DataFrame
the feature metadata (if availble in table)
'''
logger.debug('loading biom table %s' % fp)
if hasattr(fp, 'read'):
table = biom.parse_table(fp)
else:
table = biom.load_table(fp)
sid = table.ids(axis='sample')
fid = table.ids(axis='observation')
logger.info('loaded %d samples, %d features' % (len(sid), len(fid)))
data = table.matrix_data
feature_md = _get_md_from_biom(table)
if transpose:
data = data.transpose()
return sid, fid, data, feature_md
def _get_md_from_biom(table):
'''Get the metadata of last column in the biom table.
Return
------
pandas.DataFrame or None
'''
ids = table.ids(axis='observation')
metadata = table.metadata(axis='observation')
if metadata is None:
logger.debug('No metadata associated with features in biom table')
md_df = None
else:
md_df = pd.DataFrame([dict(tmd) for tmd in metadata], index=ids)
return md_df
def _read_csv(fp, sample_in_row=False, sep=','):
'''Read a csv file
Parameters
----------
fp : str
file path to the biom table
sample_in_row : bool, optional
True if csv datafile has samples as rows, features as columns
False (default) if columns are samples (rows are features)
sep : str, optional
The separator between entries in the table
Returns
-------
sid : list of str
the sample ids
fid : list of str
the feature ids
data : numpy array (2d) of float
the table (samples in columns, features in rows)
feature_md : pandas.DataFrame
the feature metadata (if availble in table)
'''
logger.debug('loading csv table %s' % fp)
# use the python engine as the default (c) engine throws an error
# a known bug in pandas (see #11166)
table = pd.read_csv(fp, header=0, engine='python', sep=sep)
# if the csv file has an additional sep at the end of each line, it cause
# pandas to create an empty column at the end. This can cause bugs with the
# normalization. so we remove it.
table.dropna(axis='columns', how='all', inplace=True)
table.set_index(table.columns[0], drop=True, inplace=True)
if sample_in_row:
table = table.transpose()
logger.debug('transposed table')
sid = table.columns
fid = table.index
data = table.values.astype(float).transpose()
logger.info('loaded %d samples, %d features' % data.shape)
return sid, fid, data
def _read_metadata(ids, f, kwargs):
'''read metadata table
Parameters
----------
ids : list like of str
ids from data table
f : str
file path of metadata
kwargs : dict
keyword argument passed to :func:`pandas.read_csv`
Returns
-------
pandas.DataFrame of metadata
'''
# load the sample/feature metadata file
if f is None:
metadata = pd.DataFrame(index=ids)
else:
if kwargs is None:
kwargs = {}
# the following code force the read the index_col as string
# by reading it as a normal column and specify its dtype and then
# setting it as index. There seems no better solution
if 'index_col' in kwargs:
index_col = kwargs.pop('index_col')
else:
index_col = 0
if 'dtype' in kwargs:
kwargs['dtype'][index_col] = str
else:
kwargs['dtype'] = {index_col: str}
try:
metadata = pd.read_csv(f, sep='\t', **kwargs)
except Exception as err:
logger.error('Error reading metadata file %r\nError: %s' % (f, err))
raise err
metadata.set_index(metadata.columns[index_col], inplace=True)
mid, ids2 = set(metadata.index), set(ids)
diff = mid - ids2
if diff:
logger.warning('These have metadata but do not have data - dropped (%d): %r' % (len(diff), diff))
diff = ids2 - mid
if diff:
logger.warning('These have data but do not have metadata: %r' % diff)
# reorder the id in metadata to align with biom
# metadata = metadata.loc[ids, ]
# check if we have duplicate index ids (which will raise)
dup = metadata.index.duplicated(keep=False)
if dup.sum() > 0:
column_str = '%d' % index_col
if metadata.index.name is not None:
if len(metadata.index.name) > 0:
column_str = metadata.index.name
errmsg = '%d duplicate id values encountered in index column %s of mapping file %r:' % (dup.sum(), column_str, f)
errmsg += '\n%s' % set(metadata.index[dup].values)
raise ValueError(errmsg)
metadata = metadata.reindex(ids)
return metadata
@ds.get_sectionsf('io.read')
def read(data_file, sample_metadata_file=None, feature_metadata_file=None,
description='', sparse=True,
data_file_type='biom', data_file_sep=',', sample_in_row=False,
sample_id_proc=None, feature_id_proc=None,
sample_metadata_kwargs=None, feature_metadata_kwargs=None,
cls=Experiment,
*, normalize):
'''Read in an experiment.
.. note:: The order in the sample and feature metadata tables are changed
to align with data table.
Parameters
----------
data_file : str or file-like object.
file path to the data table containing abundance values.
sample_metadata_file : str, default=None
File path to sample metadata (aka mapping file in
QIIME). Default to just use sample names (no additional
metadata).
feature_metadata_file : str, default=None
File path to feature metadata.
description : str
description of the experiment
sparse : bool
read the biom table into sparse or dense array
data_file_type : str, default='biom'
the data_file format. options:
* 'biom': a biom table (biom-format.org)
* 'csv': a comma-, tab-, or other delimiter- (as indicated by the
`data_file_sep` parameter) separated table. By default,
samples are in columns and features are in rows (see `sample_in_row` parameter).
* 'qiime2': a qiime2 biom table artifact
data_file_sep : str, default='\t'
column delimitor for the data table if it is a 'csv' file.
sample_in_row : bool, optional
False if data table columns are sample, True if rows are samples
sample_id_proc, feature_id_proc: callable, default=None
If not `None`, modify each sample/feature id in the data table using
the callable function. The callable accepts a list of str and
returns a list of str (sample or feature ids after processing).
Useful in metabolomics experiments, where the sample IDs in the
data table contain additional information compared to the
mapping file (using a '_' separator), and this needs to be
removed in order to sync the sample IDs between data table and
metadata file.
sample_metadata_kwargs, feature_metadata_kwargs : dict, default=None
keyword arguments passing to :func:`pandas.read_csv` when
reading sample metadata or feature metadata. For example, you
can set ``sample_metadata_kwargs={'dtype': {'pH': int},
'encoding': 'latin-8'}`` to read the pH column in the sample
metadata as int and parse the file as latin-8 instead of
utf-8. By default, it assumes the first column in the metadata
files is sample/feature IDs and is read in as row index. To
avoid this, please provide {'index_col': False}.
cls : ``class``, optional
what class object to read the data into (:class:`.Experiment` by default)
normalize : int or None
normalize each sample to the specified read count. `None` to not normalize
Returns
-------
Experiment
the new object created
'''
# store the function parameters for call history
fparams = locals()
logger.debug('Reading experiment (%s, %s, %s)' % (
data_file, sample_metadata_file, feature_metadata_file))
# load the data table
fmd = None
if data_file_type == 'biom':
sid, fid, data, fmd = _read_biom(data_file)
elif data_file_type == 'csv':
sid, fid, data = _read_csv(data_file, sample_in_row=sample_in_row, sep=data_file_sep)
elif data_file_type == 'qiime2':
sid, fid, data, fmd = _read_qiime2_zip(data_file)
elif data_file_type == 'tsv':
sid, fid, data = _read_csv(data_file, sample_in_row=sample_in_row, sep='\t')
else:
raise ValueError('unkown data_file_type %s' % data_file_type)
sid = [str(x) for x in sid]
# if we need to process the table sample/feature IDs
if sample_id_proc is not None:
sid = sample_id_proc(sid)
if feature_id_proc is not None:
fid = feature_id_proc(fid)
sample_metadata = _read_metadata(sid, sample_metadata_file, sample_metadata_kwargs)
feature_metadata = _read_metadata(fid, feature_metadata_file, feature_metadata_kwargs)
# store the sample and feature ids also as a column (for sorting, etc.)
sample_metadata['_sample_id'] = sample_metadata.index.values
feature_metadata['_feature_id'] = feature_metadata.index.values
# store the abundance per sample/feature before any procesing
sample_metadata['_calour_original_abundance'] = data.sum(axis=1)
# self.feature_metadata['_calour_original_abundance'] = self.data.sum(axis=0)
if fmd is not None:
# rename columns in biom table if exist in feature metadata file
renames = {}
for ccol in fmd.columns:
if ccol in feature_metadata.columns:
renames[ccol] = ccol + '_biom'
if renames:
fmd.rename(columns=renames, inplace=True)
# combine it with the feature metadata
feature_metadata = pd.concat([feature_metadata, fmd], axis=1)
# init the experiment details
info = {'data_file': data_file,
'data_md5': get_data_md5(data),
'sample_metadata_file': sample_metadata_file,
'sample_metadata_md5': get_file_md5(sample_metadata_file),
'feature_metadata_file': feature_metadata_file,
'feature_metadata_md5': get_file_md5(feature_metadata_file)}
exp = cls(data, sample_metadata, feature_metadata,
info=info, description=description, sparse=sparse)
if normalize is not None:
exp.normalize(total=normalize, inplace=True)
# initialize the call history
param = ['{0!s}={1!r}'.format(k, v) for k, v in fparams.items()]
exp._call_history = ['{0}({1})'.format('read_amplicon', ','.join(param))]
return exp
@ds.with_indent(4)
def read_amplicon(data_file, sample_metadata_file=None,
*, min_reads, normalize, **kwargs):
'''Read in an amplicon experiment.
This wraps :func:`read`.
Parameters
----------
min_reads : int or None
remove all samples with less than `min_reads`. `None` to keep all samples
Keyword Arguments
-----------------
%(io.read.parameters)s
Returns
-------
AmpliconExperiment
after removing low read sampls and normalization
See Also
--------
read
'''
# store the function parameters for call history
fparams = locals()
# don't do normalize before the possible filtering
exp = read(data_file, sample_metadata_file, cls=AmpliconExperiment,
normalize=None, **kwargs)
if 'taxonomy' in exp.feature_metadata.columns:
exp.feature_metadata['taxonomy'] = _get_taxonomy_string(exp, remove_underscore=False)
if min_reads is not None:
exp.filter_by_data('abundance', axis=0, cutoff=min_reads, mean_or_sum='sum', inplace=True)
if normalize is not None:
exp.normalize(total=normalize, axis=0, inplace=True)
# initialize the call history
param = ['{0!s}={1!r}'.format(k, v) for k, v in fparams.items()]
exp._call_history = ['{0}({1})'.format('read_amplicon', ','.join(param))]
return exp
@ds.with_indent(4)
def read_ms(data_file, sample_metadata_file=None, feature_metadata_file=None, gnps_file=None,
data_file_type='mzmine2', sample_in_row=None, direct_ids=None, get_mz_rt_from_feature_id=None,
use_gnps_id_from_AllFiles=True, cut_sample_id_sep=None,
mz_rt_sep=None, mz_thresh=0.02, rt_thresh=15,
description=None, sparse=False, *, normalize, **kwargs):
'''Read a mass-spec experiment.
Calour supports various ms table formats, with several preset formats (specified by the data_file_type='XXX' parameter),
as well as able to read user specified formats.
With the installation of the gnps-calour database interface, Calour can integrate MS2 information from GNPS into the analysis:
If the data table and the gnps file share the same IDs (preferred), GNPS annotations use the uniqueID of the features. Otherwise, calour
matches the features to the gnps file using an MZ and RT threshold window (specified by the mz_thresh=XXX, rt_thresh=XXX parameters).
Supported formats for ms analysis (as specified by the data_file_type='XXX' parameter) include:
* 'mzmine2': using the csv output file of mzmine2. MZ and RT are obtained via the 2nd and 3rd column in the file.
* 'biom': using a biom table for the metabolite table. featureIDs in the table (first column) can be either MZ_RT (concatenated with a separator), or a unique ID matching the gnps_file ids.
* 'openms': using a csv data table with MZ_RT or unqie ID as featureID (first column). samples can be columns (default) or rows (using the sample_in_row=True parameter)
* 'gnps-ms2': a tsv file exported from gnps, with gnps ids as featureIDs.
Parameters
----------
data_file : str
The name of the data table (mzmine2 output/bucket table/biom table) containing the per-metabolite abundances.
sample_metadata_file : str or None (optional)
None (default) to not load metadata per sample
str to specify name of sample mapping file (tsv).
Note: sample names in the bucket table and sample_metadata file must match. In case bucket table sample names contains additional
information, you can split them at the separator character (usually '_'), keeping only the first part, using the cut_sample_id_sep='_' parameter
(see below)
gnps_file : str or None (optional)
name of the gnps clusterinfosummarygroup_attributes_withIDs_arbitraryattributes/XXX.tsv file, for use with the 'gnps' database.
This enables identification of the metabolites with known MS2 (for the interactive heatmap and sorting/filtering etc), as well as linking
to the gnps page for each metabolite (from the interactive heatmap - by double clicking on the metabolite database information).
Note: requires gnps-calour database interface module (see Calour installation instructions for details).
feature_metadata_file : str or None (optional)
Name of table containing additional metadata about each feature
None (default) to not load
data_file_type: str, optional
the data file format. options include:
'mzmine2': load the mzmine2 output csv file.
MZ and RT are obtained from this file.
GNPS linking is direct via the unique id column.
table is csv, columns are samples.
'biom': load a biom table for the features
MZ and RT are obtained via the featureID (first column), which is assumed to be MZ_RT.
GNPS linking is indirect via the mz and rt threshold windows.
table is a tsv/json/hdf5 biom table, columns are samples.
'openms': load an openms output table
MZ and RT are obtained via the featureID (first column), which is assumed to be MZ_RT.
GNPS linking is indirect via the mz and rt threshold windows.
table is a csv table, columns are samples.
'gnps-ms2': load a gnps exported biom table
MZ and RT are obtained via the gnps_file if available, otherwise are NA
GNPS linking is direct via the first column (featureID).
table is a tsv/json/hdf5 biom table, columns are samples.
sample_in_row: bool or None, optional
False indicates rows in the data table file are features, True indicates rows are samples.
None to use default value according to data_file_type
direct_ids: bool or None, optional
True indicates the feature ids in the data table file are the same ids used in the gnps_file.
False indicates feature ids are not the same as in the gnps_file (such as when the ids are the MZ_RT)
None to use default value according to data_file_type
get_mz_rt_from_feature_id: bool or None, optional
True indicates the data table file feature ids contain the MZ/RT of the feature.
False to not obtain MZ/RT from the feature id
None to use default value according to data_file_type
use_gnps_id_from_AllFiles: bool, optional
True (default) to link the data table file gnps ids to the AllFiles column in the gnps_file.
False to link the data table file gnps ids to the 'cluster index' column in the gnps_file.
cut_sample_id_sep: str or None, optional
str (typically '_') to split the sampleID in the data table file, keeping only the first part.
Useful when the sampleIDs in the data table contain additional information compared to the
mapping file (using a '_' separator), and this needs to be removed in order to sync the sampleIDs between table and mapping file.
None (default) to not change the data table file sampleID
mz_rt_sep: str or None, optional
The separator character between the MZ and RT parts of the featureID (if it contains them) (usually '_').
If not supplied, autodetect the separator.
Note this is used only if get_mz_rt_from_feature_id=True
mz_thresh: float, optional
The tolerance for M/Z to match features to the gnps_file. Used only if parameter direct_ids=False.
rt_thresh: float, optional
The tolerance for retention time to match features to the gnps_file. Used only if parameter direct_ids=False.
description : str or None (optional)
Name of the experiment (for display purposes).
None (default) to assign file name
sparse : bool (optional)
False (default) to store data as dense matrix (faster but more memory)
True to store as sparse (CSR)
normalize : int or None
normalize each sample to the specified reads. `None` to not normalize
Keyword Arguments
-----------------
%(io.read.parameters)s
Returns
-------
MS1Experiment
See Also
--------
read
'''
# store the function parameters for call history
fparams = locals()
default_params = {'mzmine2': {'sample_in_row': False, 'direct_ids': True, 'get_mz_rt_from_feature_id': False, 'ctype': 'csv'},
'biom': {'sample_in_row': False, 'direct_ids': False, 'get_mz_rt_from_feature_id': True, 'ctype': 'biom'},
'openms': {'sample_in_row': False, 'direct_ids': False, 'get_mz_rt_from_feature_id': True, 'ctype': 'csv'},
'gnps-ms2': {'sample_in_row': False, 'direct_ids': True, 'get_mz_rt_from_feature_id': False, 'ctype': 'biom'}}
if data_file_type not in default_params:
raise ValueError('data_file_type %s not recognized. valid options are: %s' % (data_file_type, default_params.keys()))
# set the default params according to file type, if not specified by user
if sample_in_row is None:
sample_in_row = default_params[data_file_type]['sample_in_row']
if direct_ids is None:
direct_ids = default_params[data_file_type]['direct_ids']
if get_mz_rt_from_feature_id is None:
get_mz_rt_from_feature_id = default_params[data_file_type]['get_mz_rt_from_feature_id']
logger.debug('Reading MS data (data table %s, map file %s, data_file_type %s)' % (data_file, sample_metadata_file, data_file_type))
exp = read(data_file, sample_metadata_file, feature_metadata_file,
data_file_type=default_params[data_file_type]['ctype'], sparse=sparse,
normalize=None, cls=MS1Experiment,
sample_id_proc=lambda x: _split_sample_ids(x, split_char=cut_sample_id_sep),
sample_in_row=sample_in_row, **kwargs)
# get the MZ/RT data
if data_file_type == 'mzmine2':
if 'row m/z' not in exp.sample_metadata.index:
raise ValueError('Table file %s does not contain "row m/z" column. Is it an mzmine2 data table?' % data_file)
mzpos = exp.sample_metadata.index.get_loc('row m/z')
if 'row retention time' not in exp.sample_metadata.index:
raise ValueError('Table file %s does not contain "row retention time" column. Is it an mzmine2 data table?' % data_file)
rtpos = exp.sample_metadata.index.get_loc('row retention time')
# get the MZ and RT
exp.feature_metadata['MZ'] = exp.data[mzpos, :]
exp.feature_metadata['RT'] = exp.data[rtpos, :]
# drop the two columns which are not samples
sample_pos = np.arange(len(exp.sample_metadata))
sample_pos = list(set(sample_pos).difference([mzpos, rtpos]))
exp = exp.reorder(sample_pos)
if get_mz_rt_from_feature_id:
logger.debug('getting MZ and RT from featureIDs')
if direct_ids:
raise ValueError('Cannot get mz/rt from feature ids if direct_ids=True.')
# if needed, autodetect the mz/rt separator
if mz_rt_sep is None:
logger.debug('autodetecting mz/rt separator')
tmp = exp.feature_metadata['_feature_id'].iloc[0].split('_')
if len(tmp) == 2:
logger.debug('Autodetcted "_" as mz/rt separator')
mz_rt_sep = '_'
else:
tmp = exp.feature_metadata['_feature_id'].iloc[0].split()
if len(tmp) == 2:
logger.debug('Autodetcted " " as mz/rt separator')
mz_rt_sep = None
else:
raise ValueError('No separator detected for mz/rt separation in feature ids. please specify separator in mz_rt_sep parameter')
# get the MZ/RT
try:
exp.feature_metadata[['MZ', 'RT']] = exp.feature_metadata['_feature_id'].str.split(mz_rt_sep, expand=True)
except ValueError:
raise ValueError('Failed to obtain MZ/RT from feature ids. Maybe use get_mz_rt_from_feature_id=False?')
# mz and rt are numbers
exp.feature_metadata['MZ'] = exp.feature_metadata['MZ'].astype(float)
exp.feature_metadata['RT'] = exp.feature_metadata['RT'].astype(float)
if normalize is not None:
# record the original total read count into sample metadata
exp.normalize(total=normalize, inplace=True)
# Create the combined field for easy sorting/plotting
if 'MZ' in exp.feature_metadata and 'RT' in exp.feature_metadata:
exp.feature_metadata['mz_rt'] = ['%08.4f_%05.2f' % (x[1]['MZ'], x[1]['RT']) for x in exp.feature_metadata.iterrows()]
if gnps_file:
# load the gnps table
gnps_data = pd.read_csv(gnps_file, sep='\t')
exp.info['_calour_metabolomics_gnps_table'] = gnps_data
# use the gnpscalour database interface to get metabolite info from the gnps file
gnps_db = _get_database_class('gnps', exp=exp)
# link each feature to the gnps ids based on MZ/RT or direct_id
gnps_db._prepare_gnps_ids(direct_ids=direct_ids, mz_thresh=mz_thresh, use_gnps_id_from_AllFiles=use_gnps_id_from_AllFiles)
# add gnps names and cluster to the features as feature_metadata fields (gnps_name and gnps_cluster)
gnps_db._prepare_gnps_names()
# initialize the call history
param = ['{0!s}={1!r}'.format(k, v) for k, v in fparams.items()]
exp._call_history = ['{0}({1})'.format('read_amplicon', ','.join(param))]
return exp
def _split_sample_ids(sid, split_char=None, ignore_split=('row m/z', 'row retention time')):
'''Split the sample id in data table using the split_char returning the first split str.
Used in the read_ms() function, as a callable for the read() function
Parameters
----------
sid : list of str
the list of sample ids to process
split_char: str or None, optional
None to not split the sampleids
str to split sample id using this string
Returns
-------
list of str: the split sample ids
'''
if split_char is None:
return sid
logger.info('splitting table sample ids using separator %s. use "data_table_params={\'cut_sample_id_sep\'=None}" to disable cutting.' % split_char)
return [x.split(split_char)[0] if x not in ignore_split else x for x in sid]
def save(exp: Experiment, prefix, fmt='hdf5'):
'''Save the experiment data to disk.
This saves data table, sample metadata, and feature metadata to 3
different files with the same file prefix.
Parameters
----------
prefix : str
file path (suffixes auto added for the 3 files) to save to.
fmt : str
format for the data table. could be 'hdf5', 'txt', or 'json'.
'''
exp.save_biom('%s.biom' % prefix, fmt=fmt)
exp.sample_metadata.to_csv('%s_sample.txt' % prefix, sep='\t')
exp.feature_metadata.to_csv('%s_feature.txt' % prefix, sep='\t')
def save_biom(exp: Experiment, f, fmt='hdf5', add_metadata='taxonomy'):
'''Save experiment to biom format
Parameters
----------
f : str
the file to save to
fmt : str, optional
the output biom table format. options are:
'hdf5' (default) save to hdf5 biom table.
'json' same to json biom table.
'txt' save to text (tsv) biom table.
add_metadata : str or None, optional
add metadata column from `Experiment.feature_metadata` to biom table.
Don't add if it is `None`.
'''
logger.debug('save biom table to file %s format %s' % (f, fmt))
if fmt == 'hdf5':
tab = _create_biom_table_from_exp(exp, add_metadata, to_list=True)
with biom.util.biom_open(f, 'w') as f:
tab.to_hdf5(f, "calour")
elif fmt == 'json':
tab = _create_biom_table_from_exp(exp, add_metadata)
with open(f, 'w') as f:
tab.to_json("calour", f)
elif fmt == 'txt':
tab = _create_biom_table_from_exp(exp, add_metadata)
if add_metadata:
logger.warning('.txt format does not support taxonomy information in save. Saving without taxonomy.')
s = tab.to_tsv()
with open(f, 'w') as f:
f.write(s)
else:
raise ValueError('Unknown file format %s for save' % fmt)
logger.debug('biom table saved to file %s' % f)
def save_fasta(exp: Experiment, f, seqs=None, header='seq'):
'''Save a list of sequences to fasta.
Use taxonomy information if available, otherwise just use sequence as header.
Parameters
----------
f : str
the filename to save to
seqs : list of str sequences ('ACGT') or None, optional
None (default) to save all sequences in exp, or list of sequences to only save these sequences.
Note: sequences not in exp will not be saved
header: str, optional
The format for the per-sequence header in the output fasta file. options are:
'seq' (default): use the actual sequence as header
'num': use the 'Seq_i' as header (where i is the enumerated index).
'''
logger.debug('Save seq to fasta file %s' % f)
if seqs is None:
logger.debug('no sequences supplied - saving all sequences')
seqs = exp.feature_metadata.index.values
num_skipped = 0
with open(f, 'w') as fasta_file:
for idx, cseq in enumerate(seqs):
if cseq not in exp.feature_metadata.index:
num_skipped += 1
continue
if header == 'seq':
cheader = cseq
elif header == 'num':
cheader = 'Seq_%d' % idx
else:
raise ValueError('header %s not supported' % header)
fasta_file.write('>%s\n%s\n' % (cheader, cseq))
logger.debug('wrote fasta file with %d sequences. %d sequences skipped' % (len(seqs) - num_skipped, num_skipped))
def _create_biom_table_from_exp(exp, add_metadata='taxonomy', to_list=False):
'''Create a biom table from an experiment
Parameters
----------
exp : Experiment
add_metadata : str or None, optional
add metadata column from `Experiment.feature_metadata` to biom table.
Don't add if it is `None`.
to_list: bool, optional
True to convert the metadata field to list (for hdf5)
Returns
-------
biom_table
the biom table representation of the experiment
'''
features = exp.feature_metadata.index
samples = exp.sample_metadata.index
table = biom.table.Table(exp.data.transpose(), features, samples, type="OTU table")
# and add metabolite name as taxonomy:
if add_metadata is not None:
if add_metadata in exp.feature_metadata.columns:
# md has to be a dict of dict, so it needs to be converted from
# a DataFrame instead of Series
md = exp.feature_metadata.loc[:, [add_metadata]].to_dict('index')
# we need to make it into a list of taxonomy levels otherwise biom save fails for hdf5
if to_list:
for k, v in md.items():
# if isinstance(v[add_metadata], str):
v[add_metadata] = v[add_metadata].split(';')
table.add_metadata(md, axis='observation')
else:
logger.info('Metadata field %s not found. Saving biom table without metadata' % add_metadata)
return table
def _iter_fasta(fp):
'''Read fasta file into iterator.
Fasta file must contain header line (starting with ">") and one or more sequence lines.
Parameters
----------
fp: str
name of the fasta file
Yields
------
header: str
the header line (without ">")
sequence: str
the sequence ('ACGT'). Both header and sequence are whitespace stripped.
'''
# skip non-header lines at beginning of file
with open(fp, 'r') as fl:
for cline in fl:
if cline[0] == ">":
title = cline[1:].rstrip()
break
logger.warning('Fasta file %s has no headers' % fp)
return
lines = []
for cline in fl:
if cline[0] == ">":
yield title, ''.join(lines)
lines = []
title = cline[1:].strip()
continue
lines.append(cline.strip())
yield title, "".join(lines)
| |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Removes code coverage flags from invocations of the Clang C/C++ compiler.
If the GN arg `use_clang_coverage=true`, this script will be invoked by default.
GN will add coverage instrumentation flags to almost all source files.
This script is used to remove instrumentation flags from a subset of the source
files. By default, it will not remove flags from any files. If the option
--files-to-instrument is passed, this script will remove flags from all files
except the ones listed in --files-to-instrument.
This script also contains hard-coded exclusion lists of files to never
instrument, indexed by target operating system. Files in these lists have their
flags removed in both modes. The OS can be selected with --target-os.
This script also contains hard-coded force lists of files to always instrument,
indexed by target operating system. Files in these lists never have their flags
removed in either mode. The OS can be selected with --target-os.
The order of precedence is: force list, exclusion list, --files-to-instrument.
The path to the coverage instrumentation input file should be relative to the
root build directory, and the file consists of multiple lines where each line
represents a path to a source file, and the specified paths must be relative to
the root build directory. e.g. ../../base/task/post_task.cc for build
directory 'out/Release'. The paths should be written using OS-native path
separators for the current platform.
One caveat with this compiler wrapper is that it may introduce unexpected
behaviors in incremental builds when the file path to the coverage
instrumentation input file changes between consecutive runs, so callers of this
script are strongly advised to always use the same path such as
"${root_build_dir}/coverage_instrumentation_input.txt".
It's worth noting on try job builders, if the contents of the instrumentation
file changes so that a file doesn't need to be instrumented any longer, it will
be recompiled automatically because if try job B runs after try job A, the files
that were instrumented in A will be updated (i.e., reverted to the checked in
version) in B, and so they'll be considered out of date by ninja and recompiled.
Example usage:
clang_code_coverage_wrapper.py \\
--files-to-instrument=coverage_instrumentation_input.txt
"""
from __future__ import print_function
import argparse
import os
import subprocess
import sys
# Flags used to enable coverage instrumentation.
# Flags should be listed in the same order that they are added in
# build/config/coverage/BUILD.gn
_COVERAGE_FLAGS = [
'-fprofile-instr-generate',
'-fcoverage-mapping',
# Following experimental flags remove unused header functions from the
# coverage mapping data embedded in the test binaries, and the reduction
# of binary size enables building Chrome's large unit test targets on
# MacOS. Please refer to crbug.com/796290 for more details.
'-mllvm',
'-limited-coverage-experimental=true',
]
# Files that should not be built with coverage flags by default.
_DEFAULT_COVERAGE_EXCLUSION_LIST = [
# TODO(crbug.com/1051561): angle_unittests affected by coverage.
'../../base/message_loop/message_pump_default.cc',
'../../base/message_loop/message_pump_libevent.cc',
'../../base/message_loop/message_pump_win.cc',
'../../base/task/sequence_manager/thread_controller_with_message_pump_impl.cc', #pylint: disable=line-too-long
]
# Map of exclusion lists indexed by target OS.
# If no target OS is defined, or one is defined that doesn't have a specific
# entry, use _DEFAULT_COVERAGE_EXCLUSION_LIST.
_COVERAGE_EXCLUSION_LIST_MAP = {
'android': [
# This file caused webview native library failed on arm64.
'../../device/gamepad/dualshock4_controller.cc',
],
'fuchsia': [
# TODO(crbug.com/1174725): These files caused clang to crash while
# compiling them.
'../../base/allocator/partition_allocator/pcscan.cc',
'../../third_party/skia/src/core/SkOpts.cpp',
'../../third_party/skia/src/opts/SkOpts_hsw.cpp',
'../../third_party/skia/third_party/skcms/skcms.cc',
],
'linux': [
# These files caused a static initializer to be generated, which
# shouldn't.
# TODO(crbug.com/990948): Remove when the bug is fixed.
'../../chrome/browser/media/router/providers/cast/cast_internal_message_util.cc', #pylint: disable=line-too-long
'../../components/cast_channel/cast_channel_enum.cc',
'../../components/cast_channel/cast_message_util.cc',
'../../components/media_router/common/providers/cast/cast_media_source.cc', #pylint: disable=line-too-long
'../../ui/events/keycodes/dom/keycode_converter.cc',
# TODO(crbug.com/1051561): angle_unittests affected by coverage.
'../../base/message_loop/message_pump_default.cc',
'../../base/message_loop/message_pump_libevent.cc',
'../../base/message_loop/message_pump_win.cc',
'../../base/task/sequence_manager/thread_controller_with_message_pump_impl.cc', #pylint: disable=line-too-long
],
'chromeos': [
# These files caused clang to crash while compiling them. They are
# excluded pending an investigation into the underlying compiler bug.
'../../third_party/webrtc/p2p/base/p2p_transport_channel.cc',
'../../third_party/icu/source/common/uts46.cpp',
'../../third_party/icu/source/common/ucnvmbcs.cpp',
'../../base/android/android_image_reader_compat.cc',
# TODO(crbug.com/1051561): angle_unittests affected by coverage.
'../../base/message_loop/message_pump_default.cc',
'../../base/message_loop/message_pump_libevent.cc',
'../../base/message_loop/message_pump_win.cc',
'../../base/task/sequence_manager/thread_controller_with_message_pump_impl.cc', #pylint: disable=line-too-long
],
'win': [
# TODO(crbug.com/1051561): angle_unittests affected by coverage.
'../../base/message_loop/message_pump_default.cc',
'../../base/message_loop/message_pump_libevent.cc',
'../../base/message_loop/message_pump_win.cc',
'../../base/task/sequence_manager/thread_controller_with_message_pump_impl.cc', #pylint: disable=line-too-long
],
}
# Map of force lists indexed by target OS.
_COVERAGE_FORCE_LIST_MAP = {
# clang_profiling.cc refers to the symbol `__llvm_profile_dump` from the
# profiling runtime. In a partial coverage build, it is possible for a
# binary to include clang_profiling.cc but have no instrumented files, thus
# causing an unresolved symbol error because the profiling runtime will not
# be linked in. Therefore we force coverage for this file to ensure that
# any target that includes it will also get the profiling runtime.
'win': [r'..\..\base\test\clang_profiling.cc'],
# TODO(crbug.com/1141727) We're seeing runtime LLVM errors in mac-rel when
# no files are changed, so we suspect that this is similar to the other
# problem with clang_profiling.cc on Windows. The TODO here is to force
# coverage for this specific file on ALL platforms, if it turns out to fix
# this issue on Mac as well. It's the only file that directly calls
# `__llvm_profile_dump` so it warrants some special treatment.
'mac': ['../../base/test/clang_profiling.cc'],
}
def _remove_flags_from_command(command):
# We need to remove the coverage flags for this file, but we only want to
# remove them if we see the exact sequence defined in _COVERAGE_FLAGS.
# That ensures that we only remove the flags added by GN when
# "use_clang_coverage" is true. Otherwise, we would remove flags set by
# other parts of the build system.
start_flag = _COVERAGE_FLAGS[0]
num_flags = len(_COVERAGE_FLAGS)
start_idx = 0
try:
while True:
idx = command.index(start_flag, start_idx)
if command[idx:idx + num_flags] == _COVERAGE_FLAGS:
del command[idx:idx + num_flags]
# There can be multiple sets of _COVERAGE_FLAGS. All of these need to be
# removed.
start_idx = idx
else:
start_idx = idx + 1
except ValueError:
pass
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.usage = __doc__
arg_parser.add_argument(
'--files-to-instrument',
type=str,
help='Path to a file that contains a list of file names to instrument.')
arg_parser.add_argument(
'--target-os', required=False, help='The OS to compile for.')
arg_parser.add_argument('args', nargs=argparse.REMAINDER)
parsed_args = arg_parser.parse_args()
if (parsed_args.files_to_instrument and
not os.path.isfile(parsed_args.files_to_instrument)):
raise Exception('Path to the coverage instrumentation file: "%s" doesn\'t '
'exist.' % parsed_args.files_to_instrument)
compile_command = parsed_args.args
if not any('clang' in s for s in compile_command):
return subprocess.call(compile_command)
target_os = parsed_args.target_os
try:
# The command is assumed to use Clang as the compiler, and the path to the
# source file is behind the -c argument, and the path to the source path is
# relative to the root build directory. For example:
# clang++ -fvisibility=hidden -c ../../base/files/file_path.cc -o \
# obj/base/base/file_path.o
# On Windows, clang-cl.exe uses /c instead of -c.
source_flag = '/c' if target_os == 'win' else '-c'
source_flag_index = compile_command.index(source_flag)
except ValueError:
print('%s argument is not found in the compile command.' % source_flag)
raise
if source_flag_index + 1 >= len(compile_command):
raise Exception('Source file to be compiled is missing from the command.')
# On Windows, filesystem paths should use '\', but GN creates build commands
# that use '/'. We invoke os.path.normpath to ensure that the path uses the
# correct separator for the current platform (i.e. '\' on Windows and '/'
# otherwise).
compile_source_file = os.path.normpath(compile_command[source_flag_index + 1])
extension = os.path.splitext(compile_source_file)[1]
if not extension in ['.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.S']:
raise Exception('Invalid source file %s found' % compile_source_file)
exclusion_list = _COVERAGE_EXCLUSION_LIST_MAP.get(
target_os, _DEFAULT_COVERAGE_EXCLUSION_LIST)
force_list = _COVERAGE_FORCE_LIST_MAP.get(target_os, [])
should_remove_flags = False
if compile_source_file not in force_list:
if compile_source_file in exclusion_list:
should_remove_flags = True
elif parsed_args.files_to_instrument:
with open(parsed_args.files_to_instrument) as f:
if compile_source_file not in f.read():
should_remove_flags = True
if should_remove_flags:
_remove_flags_from_command(compile_command)
return subprocess.call(compile_command)
if __name__ == '__main__':
sys.exit(main())
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 22 14:44:00 2016
@author: belinkov
"""
from matplotlib import pyplot as plt
import numpy as np
### effect of representation ###
# ar-he, uni, 2lstm500, after layer 2
# accuracies: unseen, seen, all
labels = ['Unseen', 'Seen', 'All']
groups = ['POS', 'Morphology']
sets = ['Word', 'Char']
word_pos_accs = np.array([37.93, 81.69, 78.20])
char_pos_accs = np.array([75.51, 93.95, 92.48])
word_morph_accs = np.array([17.21, 69.36, 65.20])
char_morph_accs = np.array([49.89, 82.24, 79.66])
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., 1.01*height,
#'%d' % int(height),
'{}'.format(np.round(height, 1)),
ha='center', va='bottom')
def plot_bars_two_sets(accs1, accs2, sets, labels, title, fignum, filename, auto_label=True):
""" bar plot comparing two sets of results """
assert len(accs1) == len(accs2) and len(accs2) == len(labels), 'incompatible arguments in plot_bars_two_sets'
plt.figure(fignum)
ind = np.arange(len(labels))
width = 0.35
rects1 = plt.bar(ind, accs1, width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind + width, accs2, width, color='y', hatch='\\', label=sets[1])
plt.ylabel('Accuracy', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width, labels, size='large', fontweight='demibold')
plt.legend(loc='upper left', prop={'size':12})
#plt.ylim([30,100])
if auto_label:
autolabel(rects1)
autolabel(rects2)
#plt.show()
plt.savefig(filename)
def plot_bars_two_sets_stacked(word_pos_accs, char_pos_accs, word_morph_accs, char_morph_accs, sets, labels, title, fignum, filename):
""" bar plot comparing two sets of results """
assert len(word_pos_accs) == len(char_pos_accs) and len(char_pos_accs) == len(word_morph_accs) and len(word_morph_accs) == len(char_morph_accs), 'incompatible arguments in plot_bars_two_sets'
plt.figure(fignum)
ind = np.arange(len(labels))
width = 0.35
rects1 = plt.bar(ind, word_pos_accs, width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind, char_pos_accs-word_pos_accs, width, bottom=word_pos_accs, color='y', hatch='/', label=sets[1])
rects3 = plt.bar(ind + width, word_morph_accs, width, color='r', hatch='\\', label=sets[2])
rects4 = plt.bar(ind + width, char_morph_accs-word_morph_accs, width, bottom=word_morph_accs, color='y', hatch='\\', label=sets[3])
plt.ylabel('Accuracy', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width, labels, size='large', fontweight='demibold')
plt.legend(loc='upper left', prop={'size':12})
#plt.ylim([30,100])
#plt.show()
plt.savefig(filename)
def plot_bars_two_sets_ratios(word_pos_accs, char_pos_accs, word_morph_accs, char_morph_accs, sets, labels, title, fignum, filename):
""" bar plot comparing two sets of results """
assert len(word_pos_accs) == len(char_pos_accs) and len(char_pos_accs) == len(word_morph_accs) and len(word_morph_accs) == len(char_morph_accs), 'incompatible arguments in plot_bars_two_sets'
plt.figure(fignum)
ind = np.arange(len(labels))
width = 0.35
word_pos_errors = 100-np.array(word_pos_accs, dtype='float')
char_pos_errors = 100-np.array(char_pos_accs, dtype='float')
word_morph_errors = 100-np.array(word_morph_accs, dtype='float')
char_morph_errors = 100-np.array(char_morph_accs, dtype='float')
word_char_pos_error_reduction = (word_pos_errors - char_pos_errors) / word_pos_errors
word_char_morph_error_reduction = (word_morph_errors - char_morph_errors) / word_morph_errors
word_char_pos_absolute_acc_difference = np.array(char_pos_accs, dtype='float') - np.array(word_pos_accs, dtype='float')
word_char_morph_absolute_acc_difference = np.array(char_morph_accs, dtype='float') - np.array(word_morph_accs, dtype='float')
rects1 = plt.bar(ind, word_char_pos_absolute_acc_difference, width, color='g', hatch='/', label=sets[0])
rects3 = plt.bar(ind + width, word_char_morph_absolute_acc_difference, width, color='c', hatch='\\', label=sets[1])
plt.ylabel('Improvement in Accuracy', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width/2, labels, size='large', fontweight='demibold')
plt.legend(loc='upper right', prop={'size':14})
#plt.ylim([30,100])
plt.tight_layout()
#plt.show()
plt.savefig(filename)
#plot_bars_two_sets(word_pos_accs, char_pos_accs, sets, labels, 'POS Accuracy by Representation Type', 1, 'pos-acc-repr-type.png', auto_label=False)
#plot_bars_two_sets(word_morph_accs, char_morph_accs, sets, labels, 'Morphology Accuracy by Representation Type', 2, 'morph-acc-repr-type.png', auto_label=False)
sets_stacked = ['Word POS', 'Char POS', 'Word Morph', 'Char Morph']
#plot_bars_two_sets_stacked(word_pos_accs, char_pos_accs, word_morph_accs, char_morph_accs, sets_stacked, labels, 'Accuracy by Representation Type', 1, 'acc-repr-type-stacked.png')
#plot_bars_two_sets_ratios(word_pos_accs, char_pos_accs, word_morph_accs, char_morph_accs, ['POS', 'Morph'], labels, 'Improvement in POS and Morphology Accuracy', 111, 'acc-repr-type-diff.png')
### effect of layer depth ###
# ar-he, uni, 2lstm500
word_pos_layer2_acc = 78.20
word_pos_layer1_acc = 79.4
word_pos_layer0_acc = 77.25
word_morph_layer2_acc = 65.20
word_morph_layer1_acc = 67.03
word_morph_layer0_acc = 63.75
char_pos_layer2_acc = 92.48
char_pos_layer1_acc = 94.05
char_pos_layer0_acc = 93.27
char_morph_layer2_acc = 79.66
char_morph_layer1_acc = 81.06
char_morph_layer0_acc = 76.86
word_layer2_bleu = 9.91
word_layer1_bleu = 8.80
char_layer2_bleu = 10.65
char_layer1_bleu = 10.09
# layer2 - layer1
word_pos_diff = word_pos_layer2_acc - word_pos_layer1_acc
word_morph_diff = word_morph_layer2_acc - word_morph_layer1_acc
word_bleu_diff = word_layer2_bleu - word_layer1_bleu
char_pos_diff = char_pos_layer2_acc - char_pos_layer1_acc
char_morph_diff = char_morph_layer2_acc - char_morph_layer1_acc
char_bleu_diff = char_layer2_bleu - char_layer1_bleu
word_diffs = [word_pos_diff, word_morph_diff, word_bleu_diff]
char_diffs = [char_pos_diff, char_morph_diff, char_bleu_diff]
word_pos_accs = [word_pos_layer0_acc, word_pos_layer1_acc, word_pos_layer2_acc]
word_morph_accs = [word_morph_layer0_acc, word_morph_layer1_acc, word_morph_layer2_acc]
char_pos_accs = [char_pos_layer0_acc, char_pos_layer1_acc, char_pos_layer2_acc]
char_morph_accs = [char_morph_layer0_acc, char_morph_layer1_acc, char_morph_layer2_acc]
layer_accs = [word_pos_accs, word_morph_accs, char_pos_accs, char_morph_accs]
layer_labels = ['Word-POS', 'Word-Morph', 'Char-POS', 'Char-Morph']
layer_colors = ['r', 'y', 'g', 'c']
layer_markers = ['o', 's', 'P', '*']
# effect of layer depth in different languages
ar_en_word_layer0, ar_en_word_layer1, ar_en_word_layer2 = 77.09, 81.07, 80.31
ar_he_word_layer0, ar_he_word_layer1, ar_he_word_layer2 = 77.25, 79.40, 78.20
de_en_word_layer0, de_en_word_layer1, de_en_word_layer2 = 91.14, 93.57, 93.54
fr_en_word_layer0, fr_en_word_layer1, fr_en_word_layer2 = 92.08, 95.06, 94.61
cz_en_word_layer0, cz_en_word_layer1, cz_en_word_layer2 = 76.26, 76.98, 75.71
word_layer0_all_langs = [ar_en_word_layer0, ar_he_word_layer0, de_en_word_layer0, fr_en_word_layer0, cz_en_word_layer0]
word_layer1_all_langs = [ar_en_word_layer1, ar_he_word_layer1, de_en_word_layer1, fr_en_word_layer1, cz_en_word_layer1]
word_layer2_all_langs = [ar_en_word_layer2, ar_he_word_layer2, de_en_word_layer2, fr_en_word_layer2, cz_en_word_layer2]
ar_en_word_layers = [ar_en_word_layer0, ar_en_word_layer1, ar_en_word_layer2]
ar_he_word_layers = [ar_he_word_layer0, ar_he_word_layer1, ar_he_word_layer2]
de_en_word_layers = [de_en_word_layer0, de_en_word_layer1, de_en_word_layer2]
fr_en_word_layers = [fr_en_word_layer0, fr_en_word_layer1, fr_en_word_layer2]
cz_en_word_layers = [cz_en_word_layer0, cz_en_word_layer1, cz_en_word_layer2]
layer_labels = ['Layer 0', 'Layer 1', 'Layer 2']
layer_sets = ['Ar-En', 'Ar-He', 'De-En', 'Fr-En', 'Cz-En']
def plot_bars_layer_all_langs(accs1, accs2, accs3, sets, labels, title, fignum, filename, indices=None, legend_loc=None, opacity=1.0):
assert len(accs1) == len(accs2) and len(accs2) == len(accs3) and len(accs3) == len(labels), 'incompatible arguments in plot_bars_four_sets'
plt.figure(fignum)
ind = indices
if ind == None:
ind = np.arange(len(labels))
#print ind
width = 0.25
rects1 = plt.bar(ind, accs1, width, color='y', hatch='/', label=sets[0], alpha=opacity)
rects2 = plt.bar(ind + width, accs2, width, color='r', hatch='+', label=sets[1], alpha=opacity)
rects3 = plt.bar(ind + width + width, accs3, width, color='c', hatch='\\', label=sets[2], alpha=opacity)
plt.ylabel('Accuracy', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width, labels, size='large', fontweight='demibold')
loc = legend_loc
if loc == None:
loc = 'upper left'
plt.legend(loc=loc, prop={'size':14})
plt.ylim([60,100])
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
plt.tight_layout()
#plt.show()
plt.savefig(filename)
plot_bars_layer_all_langs(word_layer0_all_langs, word_layer1_all_langs, word_layer2_all_langs, layer_labels, layer_sets, \
'POS Accuracy by Representation Layer', 3333, 'rep-layer-acc-all-langs.png')
def plot_bars_two_groups(group1, group2, groups, labels, title, fignum, filename):
assert len(group1) == len(group2) and len(group2) == len(labels), 'incompatible arguments in plot_bars_two_groups'
plt.figure(fignum)
ind = np.arange(len(labels))
width = 1.0
rects1 = plt.bar(ind, group1, width, color='r', hatch='/', label=groups[0])
rects2 = plt.bar(ind+len(labels)+width, group2, width, color='y', hatch='\\', label=groups[1])
plt.ylabel('Change in Accuracy or BLEU', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
ticks = np.concatenate((ind + width/2, len(labels) + ind + width + width/2))
#print ticks
plt.xticks(ticks, labels + labels, size='large')
plt.axhline(color='black')
plt.legend()
plt.tight_layout()
#plt.show()
plt.savefig(filename)
#plot_bars_two_groups(word_diffs, char_diffs, ['Word', 'Char'], ['POS', 'Morph', 'BLEU'], 'Effect of Representation Layer', 3, 'layer-effect.png')
def plot_lines(xs, accs, labels, colors, markers, title, fignum, filename):
plt.figure(fignum)
for i in xrange(len(accs)):
plt.plot(xs, accs[i], '--' + markers[i], color=colors[i], label=labels[i], lw=2, markersize=10)
plt.title(title, fontweight='demibold')
plt.xlabel('Layer', size='large', fontweight='demibold')
plt.ylabel('Accuracy', size='large', fontweight='demibold')
plt.xticks([0, 1, 2])
plt.ylim(60,100)
plt.legend(loc=(0.02,0.5), prop={'weight':'medium'})
plt.tight_layout()
plt.savefig(filename)
#plot_lines([0, 1, 2], layer_accs, layer_labels, layer_colors, layer_markers, 'Accuracy by Representation Layer', 33, 'rep-layer-acc-lines.png')
def plot_bars_five_sets(accs1, accs2, accs3, accs4, accs5, sets, labels, title, fignum, filename, indices=None, legend_loc=None, opacity=1.0):
""" bar plot comparing four sets of results """
assert len(accs1) == len(accs2) and len(accs2) == len(accs3) and len(accs3) == len(accs4) and len(accs4) == len(accs5) and len(accs5) == len(labels), 'incompatible arguments in plot_bars_four_sets'
plt.figure(fignum)
ind = indices
if ind == None:
ind = np.arange(len(labels))
#print ind
width = 0.2
rects1 = plt.bar(ind, accs1, width, color='r', hatch='/', label=sets[0], alpha=opacity)
rects2 = plt.bar(ind + width, accs2, width, color='y', hatch='O', label=sets[1], alpha=opacity)
rects3 = plt.bar(ind + width + width, accs3, width, color='g', hatch='+', label=sets[2], alpha=opacity)
rects4 = plt.bar(ind + width + width + width, accs4, width, color='c', hatch='\\', label=sets[3], alpha=opacity)
rects5 = plt.bar(ind + width + width + width + width, accs5, width, color='b', hatch='*', label=sets[4], alpha=opacity)
plt.ylabel('Accuracy', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width + width, labels, size='large', fontweight='demibold')
loc = legend_loc
if loc == None:
loc = 'upper left'
plt.legend(loc=loc, prop={'size':12})
#plt.ylim([40,95])
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
plt.tight_layout()
#plt.show()
plt.savefig(filename)
#plot_bars_five_sets(ar_en_word_layers, ar_he_word_layers, de_en_word_layers, fr_en_word_layers, cz_en_word_layers, layer_sets, layer_labels, 'POS Accuracy by Representation Layer', 333, 'rep-layer-acc-all-langs.png')
### effect of target language ###
# uni, 2lstm500
word_pos_he, word_pos_en, word_pos_ar, word_pos_de = 78.13, 80.21, 67.21, 78.85
char_pos_he, char_pos_en, char_pos_ar, char_pos_de = 92.67, 93.63, 87.72, 93.05
word_morph_he, word_morph_en, word_morph_ar, word_morph_de = 64.87, 67.18, 55.63, 65.91
char_morph_he, char_morph_en, char_morph_ar, char_morph_de = 80.5, 81.49, 75.21, 80.61
bleu_word_he, bleu_char_he = 9.51, 11.15
bleu_word_en_1, bleu_word_en_2, bleu_char_en_1, bleu_char_en_2 = 24.72, 22.88, 29.46, 26.18
bleu_word_en, bleu_char_en = np.mean([bleu_word_en_1, bleu_word_en_2]), np.mean([bleu_char_en_1, bleu_char_en_2])
bleu_word_ar, bleu_char_ar = 80.43, 75.48
bleu_word_de, bleu_char_de = 11.49, 12.86
labels = ['POS', 'BLEU']
sets = ['Ar', 'He', 'En']
word_pos_accs = [word_pos_ar, word_pos_he, word_pos_en]
word_pos_accs_all = [word_pos_ar, word_pos_he, word_pos_en, word_pos_de]
word_bleus = [bleu_word_ar, bleu_word_he, bleu_word_en]
word_bleus_all = [bleu_word_ar, bleu_word_he, bleu_word_en, bleu_word_de]
ar_word_pos_bleu = [word_pos_ar, bleu_word_ar]
en_word_pos_bleu = [word_pos_en, bleu_word_en]
he_word_pos_bleu = [word_pos_he, bleu_word_he]
de_word_pos_bleu = [word_pos_de, bleu_word_de]
ar_pos_bleu = [word_pos_ar, bleu_word_ar, char_pos_ar, bleu_char_ar]
en_pos_bleu = [word_pos_en, bleu_word_en, char_pos_en, bleu_char_en]
he_pos_bleu = [word_pos_he, bleu_word_he, char_pos_he, bleu_char_he]
de_pos_bleu = [word_pos_de, bleu_word_de, char_pos_de, bleu_char_de]
labels2 = ['Word POS', 'Word BLEU', 'Char POS', 'Char BLEU']
ar_word_morph_bleu = [word_morph_ar, bleu_word_ar]
en_word_morph_bleu = [word_morph_en, bleu_word_en]
he_word_morph_bleu = [word_morph_he, bleu_word_he]
ar_morph_bleu = [word_morph_ar, bleu_word_ar, char_morph_ar, bleu_char_ar]
en_morph_bleu = [word_morph_en, bleu_word_en, char_morph_en, bleu_char_en]
he_morph_bleu = [word_morph_he, bleu_word_he, char_morph_he, bleu_char_he]
labels3 = ['Word Morph', 'Word BLEU', 'Char Morph', 'Char BLEU']
ar_word_pos_morph_bleu = [word_pos_ar, word_morph_ar, bleu_word_ar]
en_word_pos_morph_bleu = [word_pos_en, word_morph_en, bleu_word_en]
he_word_pos_morph_bleu = [word_pos_he, word_morph_he, bleu_word_he]
de_word_pos_morph_bleu = [word_pos_de, word_morph_de, bleu_word_de]
def plot_bars_three_sets(accs1, accs2, accs3, sets, labels, title, fignum, filename, indices=None, legend_loc=None, ylabel='Accuracy or BLEU'):
""" bar plot comparing three sets of results """
assert len(accs1) == len(accs2) and len(accs2) == len(accs3) and len(accs3) == len(labels), 'incompatible arguments in plot_bars_three_sets'
plt.figure(fignum)
ind = indices
if ind == None:
ind = np.arange(len(labels))
print ind
width = 0.25
rects1 = plt.bar(ind, accs1, width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind + width, accs2, width, color='y', hatch='O', label=sets[1])
rects3 = plt.bar(ind + width + width, accs3, width, color='g', hatch='+', label=sets[2])
plt.ylabel(ylabel, size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width + width/2, labels, size='large', fontweight='demibold')
loc = legend_loc
if loc == None:
loc = 'upper left'
plt.legend(loc=loc, prop={'size':12})
#plt.ylim([30,100])
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
plt.tight_layout()
#plt.show()
plt.savefig(filename)
#plot_bars_three_sets(ar_word_pos_bleu, he_word_pos_bleu, en_word_pos_bleu, sets, labels, 'Effect of Target Language on POS Accuracy', 4, 'pos-acc-target-lang.png')
#plot_bars_three_sets(ar_pos_bleu, he_pos_bleu, en_pos_bleu, sets, labels2, 'Effect of Target Language on POS Accuracy', 5, 'pos-acc-target-lang2.png', indices=np.array([0,1,2.5,3.5]), legend_loc=(0.33,0.75))
#plot_bars_three_sets(ar_morph_bleu, he_morph_bleu, en_morph_bleu, sets, labels3, 'Effect of Target Language on Morph Accuracy', 6, 'morph-acc-target-lang2.png', indices=np.array([0,1,2.5,3.5]), legend_loc=(0.33,0.75))
### effect of lstm unit ###
word_pos_hidden_acc = 78.20
word_pos_cell_acc = 78.19
char_pos_hidden_acc = 92.48
char_pos_cell_acc = 90.27
word_morph_hidden_acc = 65.20
word_morph_cell_acc = 65.20 ### TODO verify
char_morph_hidden_acc = 79.66
char_morph_cell_acc = 78.15
### Semtags ###
semtags_word_layer2 = [49.26, 86.86, 86.29]
semtags_char_layer2 = [68.63, 89.16, 88.85]
semtags_word_layer1 = [48.34, 87.42, 86.83]
semtags_char_layer1 = [62.24, 89.51, 89.09]
sets = ['Word, layer 2', 'Char, layer 2', 'Word, layer 1', 'Char, layer 1']
labels = ['Unseen', 'Seen', 'All']
def plot_bars_four_sets(accs1, accs2, accs3, accs4, sets, labels, title, fignum, filename, indices=None, legend_loc=None, opacity=1.0):
""" bar plot comparing four sets of results """
assert len(accs1) == len(accs2) and len(accs2) == len(accs3) and len(accs3) == len(accs4) and len(accs4) == len(labels), 'incompatible arguments in plot_bars_four_sets'
plt.figure(fignum)
ind = indices
if ind == None:
ind = np.arange(len(labels))
#print ind
width = 0.2
rects1 = plt.bar(ind, accs1, width, color='r', hatch='/', label=sets[0], alpha=opacity)
rects2 = plt.bar(ind + width, accs2, width, color='y', hatch='O', label=sets[1], alpha=opacity)
rects3 = plt.bar(ind + width + width, accs3, width, color='g', hatch='+', label=sets[2], alpha=opacity)
rects4 = plt.bar(ind + width + width + width, accs4, width, color='c', hatch='\\', label=sets[3], alpha=opacity)
plt.ylabel('Accuracy or BLEU', size='large', fontweight='demibold')
plt.title(title, fontweight='demibold')
plt.xticks(ind + width + width/2, labels, size='large', fontweight='demibold')
loc = legend_loc
if loc == None:
loc = 'upper left'
plt.legend(loc=loc, prop={'size':14})
#plt.ylim([40,95])
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
plt.tight_layout()
#plt.show()
plt.savefig(filename)
#plot_bars_four_sets(semtags_word_layer2, semtags_char_layer2, semtags_word_layer1, semtags_char_layer1, sets, labels, 'Semtag Accuracy by Representation Type and Layer', 7, 'semtag-acc-type-layer.png', legend_loc='upper left')
# more on target language
labels = ['POS', 'BLEU']
sets_all = ['Ar', 'He', 'De', 'En']
#plot_bars_four_sets(ar_word_pos_bleu, he_word_pos_bleu, de_word_pos_bleu, en_word_pos_bleu, sets_all, labels, 'Effect of Target Language on POS Accuracy', 44, 'pos-acc-target-lang-all.png', legend_loc='upper right')
labels_pos_morph_bleu = ['POS', 'Morphology', 'BLEU']
plot_bars_four_sets(ar_word_pos_morph_bleu, he_word_pos_morph_bleu, de_word_pos_morph_bleu, en_word_pos_morph_bleu, sets_all, labels_pos_morph_bleu, 'Effect of Target Language on POS/Morph Accuracy', 44, 'pos-morph-acc-target-lang-all.png', legend_loc='upper right')
# order is: word layer 1, char layer 1, word layer 2, char layer 2
semtags_unseen = [semtags_word_layer1[0], semtags_char_layer1[0], semtags_word_layer2[0], semtags_char_layer2[0]]
semtags_seen = [semtags_word_layer1[1], semtags_char_layer1[1], semtags_word_layer2[1], semtags_char_layer2[1]]
semtags_all = [semtags_word_layer1[2], semtags_char_layer1[2], semtags_word_layer2[2], semtags_char_layer2[2]]
sets = ['Word, L1', 'Char, L1', 'Word, L2', 'Char, L2']
labels = ['Unseen', 'Seen', 'All']
def plot_bars_subplots_four_sets_three_groups(accs1, accs2, accs3, sets, labels, title, fignum, filename, indices=None, legend_loc=None):
""" bar plot comparing four sets of results in 3 subplots """
#assert len(accs1) == len(accs2) and len(accs2) == len(accs3) and len(accs3) == len(labels), 'incompatible arguments in plot_bars_four_sets'
fig = plt.figure(fignum)
ind = indices
if ind == None:
ind = np.arange(len(sets))*0.5
print ind
width = 0.5
ax1 = plt.subplot(1,3,1)
rects1 = plt.bar(ind[0], accs1[0], width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind[1], accs1[1], width, color='y', hatch='O', label=sets[1])
rects3 = plt.bar(ind[2], accs1[2], width, color='g', hatch='+', label=sets[2])
rects4 = plt.bar(ind[3], accs1[3], width, color='c', hatch='\\', label=sets[3])
plt.ylabel('Accuracy', size='large', fontweight='demibold')
#plt.xlabel(labels[0],size='large', fontweight='demibold')
plt.title(labels[0], fontweight='demibold')
plt.xticks([0,0.5,1,1.5], ['' for x in sets], size='large', fontweight='demibold')
plt.ylim([40,95])
ax2 = plt.subplot(1,3,2)
rects1 = plt.bar(ind[0], accs2[0], width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind[1], accs2[1], width, color='y', hatch='O', label=sets[1])
rects3 = plt.bar(ind[2], accs2[2], width, color='g', hatch='+', label=sets[2])
rects4 = plt.bar(ind[3], accs2[3], width, color='c', hatch='\\', label=sets[3])
#plt.ylabel('Accuracy', size='large', fontweight='demibold')
#plt.xlabel(labels[1], size='large', fontweight='demibold')
plt.title(labels[1], fontweight='demibold')
plt.xticks([0,0.5,1,1.5], ['' for x in sets], size='large', fontweight='demibold')
plt.ylim([80,95])
ax3 = plt.subplot(1,3,3)
rects1 = plt.bar(ind[0], accs3[0], width, color='r', hatch='/', label=sets[0])
rects2 = plt.bar(ind[1], accs3[1], width, color='y', hatch='O', label=sets[1])
rects3 = plt.bar(ind[2], accs3[2], width, color='g', hatch='+', label=sets[2])
rects4 = plt.bar(ind[3], accs3[3], width, color='c', hatch='\\', label=sets[3])
#plt.ylabel('Accuracy', size='large', fontweight='demibold')
#plt.xlabel(labels[2], size='large', fontweight='demibold')
plt.title(labels[2], fontweight='demibold')
plt.xticks([0,0.5,1,1.5], ['' for x in sets], size='large', fontweight='demibold')
plt.ylim([80,95])
for ax in [ax1, ax2, ax3]:
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
lgd = ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=4, fontsize=10)
#autolabel(rects1)
#autolabel(rects2)
#autolabel(rects3)
#loc = legend_loc
#if loc == None:
# loc = 'upper left'
#plt.legend(loc=loc, prop={'size':9})
#plt.legend()
#plt.legend(loc = 'lower center', bbox_to_anchor = (0,0.5,1,1),
# bbox_transform = plt.gcf().transFigure )
sup = plt.suptitle('Semtag Accuracy', fontweight='demibold', size=16)
#plt.tight_layout()
plt.subplots_adjust(top=0.86)
#plt.show()
plt.savefig(filename, bbox_extra_artists=(lgd, sup), bbox_inches='tight')
#plot_bars_subplots_four_sets_three_groups(semtags_unseen, semtags_seen, semtags_all, sets, labels, 'Semtag Accuracy', 8, 'semtag-acc.png')
| |
"""Support for Voice mailboxes."""
from __future__ import annotations
import asyncio
from contextlib import suppress
from datetime import timedelta
from http import HTTPStatus
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components import frontend
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.setup import async_prepare_setup_platform
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mailbox"
EVENT = "mailbox_updated"
CONTENT_TYPE_MPEG = "audio/mpeg"
CONTENT_TYPE_NONE = "none"
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for mailboxes."""
mailboxes: list[Mailbox] = []
frontend.async_register_built_in_panel(hass, "mailbox", "mailbox", "mdi:mailbox")
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(
p_type: str,
p_config: ConfigType | None = None,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, "async_get_handler"):
mailbox = await platform.async_get_handler(
hass, p_config, discovery_info
)
elif hasattr(platform, "get_handler"):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info
)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error("Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_add_entities([mailbox_entity])
setup_tasks = [
asyncio.create_task(async_setup_platform(p_type, p_config))
for p_type, p_config in config_per_platform(config, DOMAIN)
if p_type is not None
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox: Mailbox) -> None:
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
@callback
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
async def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes: list[Mailbox]) -> None:
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request: web.Request) -> web.Response:
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
"name": mailbox.name,
"has_media": mailbox.has_media,
"can_delete": mailbox.can_delete,
}
)
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
await mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(10):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
_LOGGER.error("Error getting media: %s", err)
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
if stream:
return web.Response(body=stream, content_type=mailbox.media_type)
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
| |
"""
-----
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License. See http://scipy.org.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Author: Pearu Peterson <pearu@cens.ioc.ee>
Created: May 2006
-----
"""
__all__ = ['Statement','BeginStatement','EndStatement', 'Variable',
'AttributeHolder','ProgramBlock']
import re
import sys
import copy
import logging
from readfortran import Line, Comment
from numpy.distutils.misc_util import yellow_text, red_text
from utils import split_comma, specs_split_comma, is_int_literal_constant
from utils import classes
logger = logging.getLogger('fparser')
class AttributeHolder(object):
# copied from symbolic.base module
"""
Defines a object with predefined attributes. Only those attributes
are allowed that are specified as keyword arguments of a constructor.
When an argument is callable then the corresponding attribute will
be read-only and set by the value the callable object returns.
"""
def __init__(self, **kws):
self._attributes = {}
self._readonly = []
for k,v in kws.items():
self._attributes[k] = v
if callable(v):
self._readonly.append(k)
return
def __getattr__(self, name):
if name not in self._attributes:
raise AttributeError,'%s instance has no attribute %r, '\
'expected attributes: %s' \
% (self.__class__.__name__,name,
','.join(self._attributes.keys()))
value = self._attributes[name]
if callable(value):
value = value()
self._attributes[name] = value
return value
def __setattr__(self, name, value):
if name in ['_attributes','_readonly']:
self.__dict__[name] = value
return
if name in self._readonly:
raise AttributeError,'%s instance attribute %r is readonly' \
% (self.__class__.__name__, name)
if name not in self._attributes:
raise AttributeError,'%s instance has no attribute %r, '\
'expected attributes: %s' \
% (self.__class__.__name__,name,','.join(self._attributes.keys()))
self._attributes[name] = value
def isempty(self):
for k in self._attributes.keys():
v = getattr(self,k)
if v: return False
return True
def __repr__(self): return self.torepr()
def torepr(self, depth=-1, tab = ''):
if depth==0: return tab + self.__class__.__name__
l = [self.__class__.__name__+':']
ttab = tab + ' '
for k in self._attributes.keys():
v = getattr(self,k)
if v:
if isinstance(v,list):
l.append(ttab + '%s=<%s-list>' % (k,len(v)))
elif isinstance(v,dict):
l.append(ttab + '%s=<dict with keys %s>' % (k,v.keys()))
else:
l.append(ttab + '%s=<%s>' % (k,type(v)))
return '\n'.join(l)
def todict(self):
d = {}
for k in self._attributes.keys():
v = getattr(self, k)
d[k] = v
return d
def get_base_classes(cls):
bases = ()
for c in cls.__bases__:
bases += get_base_classes(c)
return bases + cls.__bases__ + (cls,)
class Variable(object):
"""
Variable instance has attributes:
name
typedecl
dimension
attributes
intent
parent - Statement instances defining the variable
"""
__metaclass__ = classes
def __init__(self, parent, name):
self.parent = parent
self.parents = [parent]
self.name = name
self.typedecl = None
self.dimension = None
self.bounds = None
self.length = None
self.attributes = []
self.intent = None
self.bind = []
self.check = []
self.init = None
# after calling analyze the following additional attributes are set:
# .is_array:
# rank
# shape
return
def __repr__(self):
l = []
for a in ['name','typedecl','dimension','bounds','length','attributes','intent','bind','check','init']:
v = getattr(self,a)
if v:
l.append('%s=%r' % (a,v))
return 'Variable: ' + ', '.join(l)
def get_bit_size(self):
typesize = self.typedecl.get_bit_size()
if self.is_pointer():
# The size of pointer descriptor is compiler version dependent. Read:
# http://www.nersc.gov/vendor_docs/intel/f_ug1/pgwarray.htm
# https://www.cca-forum.org/pipermail/cca-fortran/2003-February/000123.html
# https://www.cca-forum.org/pipermail/cca-fortran/2003-February/000122.html
# On sgi descriptor size may be 128+ bits!
if self.is_array():
wordsize = 4 # XXX: on a 64-bit system it is 8.
rank = len(self.bounds or self.dimension)
return 6 * wordsize + 12 * rank
return typesize
if self.is_array():
size = reduce(lambda x,y:x*y,self.bounds or self.dimension,1)
if self.length:
size *= self.length
return size * typesize
if self.length:
return self.length * typesize
return typesize
def get_typedecl(self):
if self.typedecl is None:
self.set_type(self.parent.get_type(self.name))
return self.typedecl
def add_parent(self, parent):
if id(parent) not in map(id, self.parents):
self.parents.append(parent)
self.parent = parent
return
def set_type(self, typedecl):
if self.typedecl is not None:
if not self.typedecl==typedecl:
self.parent.warning(\
'variable %r already has type %s,'\
' resetting to %s' \
% (self.name, self.typedecl.tostr(),typedecl.tostr()))
assert typedecl is not None
self.typedecl = typedecl
return
def set_init(self, expr):
if self.init is not None:
if not self.init==expr:
self.parent.warning(\
'variable %r already has initialization %r, '\
' resetting to %r' % (self.name, self.expr, expr))
self.init = expr
return
def set_dimension(self, dims):
dims = [tuple(dim.split(':')) for dim in dims]
dims = [tuple(map(str.strip, dim)) for dim in dims]
if self.dimension is not None:
if not self.dimension==dims:
self.parent.warning(\
'variable %r already has dimension %r, '\
' resetting to %r' % (self.name, self.dimension, dims))
self.dimension = dims
return
def set_bounds(self, bounds):
if self.bounds is not None:
if not self.bounds==bounds:
self.parent.warning(\
'variable %r already has bounds %r, '\
' resetting to %r' % (self.name, self.bounds, bounds))
self.bounds = bounds
return
def set_length(self, length):
if self.length is not None:
if not self.length==length:
self.parent.warning(\
'variable %r already has length %r, '\
' resetting to %r' % (self.name, self.length, length))
self.length = length
return
known_intent_specs = ['IN','OUT','INOUT','CACHE','HIDE', 'COPY',
'OVERWRITE', 'CALLBACK', 'AUX', 'C', 'INPLACE',
'OUT=']
def set_intent(self, intent):
if self.intent is None:
self.intent = []
for i in intent:
if i not in self.intent:
if i not in self.known_intent_specs:
self.parent.warning('unknown intent-spec %r for %r'\
% (i, self.name))
self.intent.append(i)
return
known_attributes = ['PUBLIC', 'PRIVATE', 'ALLOCATABLE', 'ASYNCHRONOUS',
'EXTERNAL', 'INTRINSIC', 'OPTIONAL', 'PARAMETER',
'POINTER', 'PROTECTED', 'SAVE', 'TARGET', 'VALUE',
'VOLATILE', 'REQUIRED']
def is_intent_in(self):
if not self.intent: return True
if 'HIDE' in self.intent: return False
if 'INPLACE' in self.intent: return False
if 'IN' in self.intent: return True
if 'OUT' in self.intent: return False
if 'INOUT' in self.intent: return False
if 'OUTIN' in self.intent: return False
return True
def is_intent_inout(self):
if not self.intent: return False
if 'INOUT' in self.intent:
if 'IN' in self.intent or 'HIDE' in self.intent or 'INPLACE' in self.intent:
self.warning('INOUT ignored in INPUT(%s)' % (', '.join(self.intent)))
return False
return True
return False
def is_intent_hide(self):
if not self.intent: return False
if 'HIDE' in self.intent: return True
if 'OUT' in self.intent:
return 'IN' not in self.intent and 'INPLACE' not in self.intent and 'INOUT' not in self.intent
return False
def is_intent_inplace(self): return self.intent and 'INPLACE' in self.intent
def is_intent_out(self): return self.intent and 'OUT' in self.intent
def is_intent_c(self): return self.intent and 'C' in self.intent
def is_intent_cache(self): return self.intent and 'CACHE' in self.intent
def is_intent_copy(self): return self.intent and 'COPY' in self.intent
def is_intent_overwrite(self): return self.intent and 'OVERWRITE' in self.intent
def is_intent_callback(self): return self.intent and 'CALLBACK' in self.intent
def is_intent_aux(self): return self.intent and 'AUX' in self.intent
def is_private(self):
if 'PUBLIC' in self.attributes: return False
if 'PRIVATE' in self.attributes: return True
return self.parent.parent.check_private(self.name)
def is_public(self): return not self.is_private()
def is_allocatable(self): return 'ALLOCATABLE' in self.attributes
def is_external(self): return 'EXTERNAL' in self.attributes
def is_intrinsic(self): return 'INTRINSIC' in self.attributes
def is_parameter(self): return 'PARAMETER' in self.attributes
def is_optional(self): return 'OPTIONAL' in self.attributes and 'REQUIRED' not in self.attributes and not self.is_intent_hide()
def is_required(self): return self.is_optional() and not self.is_intent_hide()
def is_pointer(self): return 'POINTER' in self.attributes
def is_array(self): return not not (self.bounds or self.dimension)
def is_scalar(self): return not self.is_array()
def update(self, *attrs):
attributes = self.attributes
if len(attrs)==1 and isinstance(attrs[0],(tuple,list)):
attrs = attrs[0]
for attr in attrs:
lattr = attr.lower()
uattr = attr.upper()
if lattr.startswith('dimension'):
assert self.dimension is None, `self.dimension,attr`
l = attr[9:].lstrip()
assert l[0]+l[-1]=='()',`l`
self.set_dimension(split_comma(l[1:-1].strip(), self.parent.item))
continue
if lattr.startswith('intent'):
l = attr[6:].lstrip()
assert l[0]+l[-1]=='()',`l`
self.set_intent(specs_split_comma(l[1:-1].strip(),
self.parent.item, upper=True))
continue
if lattr.startswith('bind'):
l = attr[4:].lstrip()
assert l[0]+l[-1]=='()',`l`
self.bind = specs_split_comma(l[1:-1].strip(), self.parent.item,
upper = True)
continue
if lattr.startswith('check'):
l = attr[5:].lstrip()
assert l[0]+l[-1]=='()',`l`
self.check.extend(split_comma(l[1:-1].strip(), self.parent.item))
continue
if uattr not in attributes:
if uattr not in self.known_attributes:
self.parent.warning('unknown attribute %r' % (attr))
attributes.append(uattr)
return
def __str__(self):
s = ''
typedecl = self.get_typedecl()
if typedecl is not None:
s += typedecl.tostr() + ' '
a = self.attributes[:]
if self.dimension is not None:
a.append('DIMENSION(%s)' % (', '.join([':'.join(spec) for spec in self.dimension])))
if self.intent is not None:
a.append('INTENT(%s)' % (', '.join(self.intent)))
if self.bind:
a.append('BIND(%s)' % (', '.join(self.bind)))
if self.check:
a.append('CHECK(%s)' % (', '.join(self.check)))
if a:
s += ', ' + ', '.join(a) + ' :: '
s += self.name
if self.bounds:
s += '(%s)' % (', '.join([':'.join(spec) for spec in self.bounds]))
if self.length:
if is_int_literal_constant(self.length):
s += '*%s' % (self.length)
else:
s += '*(%s)' % (self.length)
if self.init:
s += ' = ' + self.init
return s
def get_array_spec(self):
assert self.is_array(),'array_spec is available only for arrays'
if self.bounds:
if self.dimension:
self.parent.warning('both bounds=%r and dimension=%r are defined, ignoring dimension.' % (self.bounds, self.dimension))
array_spec = self.bounds
else:
array_spec = self.dimension
return array_spec
def is_deferred_shape_array(self):
if not self.is_array(): return False
return self.is_allocatable() or self.is_pointer()
def is_assumed_size_array(self):
if not self.is_array(): return False
return self.get_array_spec()[-1][-1]=='*'
def is_assumed_shape_array(self):
if not self.is_array(): return False
if self.is_deferred_shape_array(): return False
for spec in self.get_array_spec():
if not spec[-1]: return True
return False
def is_explicit_shape_array(self):
if not self.is_array(): return False
if self.is_deferred_shape_array(): return False
for spec in self.get_array_spec():
if not spec[-1] or spec[-1] == '*': return False
return True
def is_allocatable_array(self):
return self.is_array() and self.is_allocatable()
def is_array_pointer(self):
return self.is_array() and self.is_pointer()
def analyze(self):
typedecl = self.get_typedecl()
if self.is_array():
array_spec = self.get_array_spec()
self.rank = len(array_spec)
if self.is_deferred_shape_array(): # a(:,:)
pass
elif self.is_explicit_shape_array():
shape = []
for spec in array_spec:
if len(spec)==1:
shape.append(spec[0])
else:
try:
n = int(spec[1]) - int(spec[0])
except ValueError:
n = '(%s)-(%s)' % (spec[1], spec[0])
shape.append(str(n))
self.shape = shape
return
def error(self, message):
return self.parent.error(message)
def warning(self, message):
return self.parent.warning(message)
def info(self, message):
return self.parent.info(message)
class ProgramBlock(object):
__metaclass__ = classes
class Statement(object):
"""
Statement instance has attributes:
parent - Parent BeginStatement or FortranParser instance
item - Line instance containing the statement line
isvalid - boolean, when False, the Statement instance will be ignored
"""
__metaclass__ = classes
modes = ['free','fix','f77','pyf']
_repr_attr_names = []
def __init__(self, parent, item):
self.parent = parent
if item is not None:
self.reader = item.reader
else:
self.reader = parent.reader
self.top = getattr(parent,'top',None) # the top of statement tree
self.item = item
if isinstance(parent, ProgramBlock):
self.programblock = parent
elif isinstance(self, ProgramBlock):
self.programblock = self
elif hasattr(parent,'programblock'):
self.programblock = parent.programblock
else:
#self.warning('%s.programblock attribute not set.' % (self.__class__.__name__))
pass
# when a statement instance is constructed by error, set isvalid to False
self.isvalid = True
# when a statement should be ignored, set ignore to True
self.ignore = False
# attribute a will hold analyze information.
a_dict = {}
for cls in get_base_classes(self.__class__):
if hasattr(cls,'a'):
a_dict.update(copy.deepcopy(cls.a.todict()))
self.a = AttributeHolder(**a_dict)
if hasattr(self.__class__,'a'):
assert self.a is not self.__class__.a
self.process_item()
return
def __repr__(self):
return self.torepr()
def torepr(self, depth=-1,incrtab=''):
tab = incrtab + self.get_indent_tab()
clsname = self.__class__.__name__
l = [tab + yellow_text(clsname)]
if depth==0:
return '\n'.join(l)
ttab = tab + ' '
for n in self._repr_attr_names:
attr = getattr(self, n, None)
if not attr: continue
if hasattr(attr, 'torepr'):
r = attr.torepr(depth-1,incrtab)
else:
r = repr(attr)
l.append(ttab + '%s=%s' % (n, r))
if self.item is not None: l.append(ttab + 'item=%r' % (self.item))
if not self.isvalid: l.append(ttab + 'isvalid=%r' % (self.isvalid))
if self.ignore: l.append(ttab + 'ignore=%r' % (self.ignore))
if not self.a.isempty():
l.append(ttab + 'a=' + self.a.torepr(depth-1,incrtab+' ').lstrip())
return '\n'.join(l)
def get_indent_tab(self,deindent=False,isfix=None):
if isfix is None: isfix = self.reader.isfixed
if isfix:
tab = ' '*6
else:
tab = ''
p = self.parent
while isinstance(p, Statement):
tab += ' '
p = p.parent
if deindent:
tab = tab[:-2]
label = getattr(self.item, 'label', None)
if label is None:
return tab
s = str(label)
if isfix:
s = ' '+s
tab = tab[len(s):]
if not tab: tab = ' '
tab = s + tab
return tab
def __str__(self):
return self.tofortran()
def asfix(self):
lines = []
for line in self.tofortran(isfix=True).split('\n'):
if len(line)>72 and line[0]==' ':
lines.append(line[:72]+'&\n &')
line = line[72:]
while len(line)>66:
lines.append(line[:66]+'&\n &')
line = line[66:]
lines.append(line+'\n')
else: lines.append(line+'\n')
return ''.join(lines).replace('\n &\n','\n')
def format_message(self, kind, message):
if self.item is not None:
message = self.reader.format_message(kind, message,
self.item.span[0], self.item.span[1])
else:
return message
return message
# def show_message(self, message, stream=sys.stderr):
# print >> stream, message
# stream.flush()
# return
def error(self, message):
message = self.format_message('ERROR', red_text(message))
logger.error(message)
# self.show_message(message)
return
def warning(self, message):
message = self.format_message('WARNING', yellow_text(message))
logger.warning(message)
# self.show_message(message)
return
def info(self, message):
message = self.format_message('INFO', message)
logger.info(message)
# self.show_message(message)
return
def analyze(self):
self.warning('nothing analyzed')
return
def get_variable(self, name):
""" Return Variable instance of variable name.
"""
mth = getattr(self,'get_variable_by_name', self.parent.get_variable)
return mth(name)
def get_type(self, name):
""" Return type declaration using implicit rules
for name.
"""
mth = getattr(self,'get_type_by_name', self.parent.get_type)
return mth(name)
def get_type_decl(self, kind):
mth = getattr(self,'get_type_decl_by_kind', self.parent.get_type_decl)
return mth(kind)
def get_provides(self):
""" Returns dictonary containing statements that block provides or None when N/A.
"""
return
class BeginStatement(Statement):
"""[ construct_name : ] <blocktype> [ <name> ]
BeginStatement instances have additional attributes:
name
blocktype
Block instance has attributes:
content - list of Line or Statement instances
name - name of the block, unnamed blocks are named
with the line label
construct_name - name of a construct
parent - Block or FortranParser instance
item - Line instance containing the block start statement
get_item, put_item - methods to retrive/submit Line instances
from/to Fortran reader.
isvalid - boolean, when False, the Block instance will be ignored.
stmt_cls, end_stmt_cls
"""
_repr_attr_names = ['blocktype','name','construct_name'] + Statement._repr_attr_names
def __init__(self, parent, item=None):
self.content = []
self.get_item = parent.get_item # get line function
self.put_item = parent.put_item # put line function
if not hasattr(self, 'blocktype'):
self.blocktype = self.__class__.__name__.lower()
if not hasattr(self, 'name'):
# process_item may change this
self.name = '__'+self.blocktype.upper()+'__'
self.construct_name = getattr(item,'name',None)
Statement.__init__(self, parent, item)
return
def tostr(self):
return self.blocktype.upper() + ' '+ self.name
def tofortran(self, isfix=None):
construct_name = self.construct_name
construct_name = construct_name + ': ' if construct_name else ''
l=[self.get_indent_tab(isfix=isfix) + construct_name + self.tostr()]
for c in self.content:
l.append(c.tofortran(isfix=isfix))
return '\n'.join(l)
def torepr(self, depth=-1, incrtab=''):
tab = incrtab + self.get_indent_tab()
ttab = tab + ' '
l=[Statement.torepr(self, depth=depth,incrtab=incrtab)]
if depth==0 or not self.content:
return '\n'.join(l)
l.append(ttab+'content:')
for c in self.content:
if isinstance(c,EndStatement):
l.append(c.torepr(depth-1,incrtab))
else:
l.append(c.torepr(depth-1,incrtab + ' '))
return '\n'.join(l)
def process_item(self):
""" Process the line
"""
item = self.item
if item is None: return
self.fill()
return
def fill(self, end_flag = False):
"""
Fills blocks content until the end of block statement.
"""
mode = self.reader.mode
class_list = self.get_classes()
self.classes = [cls for cls in class_list if mode in cls.modes]
self.pyf_classes = [cls for cls in class_list if 'pyf' in cls.modes]
item = self.get_item()
while item is not None:
if isinstance(item, Line):
if self.process_subitem(item):
end_flag = True
break
elif isinstance(item, Comment):
# TODO: FIX ME, Comment content is a string
self.content.append(classes.Comment(self, item))
else:
raise NotImplementedError(`item`)
item = self.get_item()
if not end_flag:
self.warning('failed to find the end of block')
return
def process_subitem(self, item):
"""
Check is item is blocks start statement, if it is, read the block.
Return True to stop adding items to given block.
"""
line = item.get_line()
# First check for the end of block
cls = self.end_stmt_cls
if cls.match(line):
stmt = cls(self, item)
if stmt.isvalid:
self.content.append(stmt)
return True
if item.is_f2py_directive:
classes = self.pyf_classes
else:
classes = self.classes
# Look for statement match
for cls in classes:
if cls.match(line):
stmt = cls(self, item)
if stmt.isvalid:
if not stmt.ignore:
self.content.append(stmt)
return False
# item may be cloned that changes the items line:
line = item.get_line()
# Check if f77 code contains inline comments or other f90
# constructs that got undetected by get_source_info.
if item.reader.isf77:
i = line.find('!')
if i != -1:
message = item.reader.format_message(\
'WARNING',
'no parse pattern found for "%s" in %r block,'\
' trying to remove inline comment (not in Fortran 77).'\
% (item.get_line(),self.__class__.__name__),
item.span[0], item.span[1])
# .. but at the expense of loosing the comment.
logger.warning(message)
# self.show_message(message)
if line[:i]:
newitem = item.copy(line[:i].rstrip())
return self.process_subitem(newitem)
else:
return True
# try fix statement classes
f77_classes = self.classes
classes = []
for cls in self.get_classes():
if 'f77' in cls.modes and cls not in f77_classes:
classes.append(cls)
if classes:
message = item.reader.format_message(\
'WARNING',
'no parse pattern found for "%s" in %r block'\
' maybe due to strict f77 mode.'\
' Trying f90 fix mode patterns..'\
% (item.get_line(),self.__class__.__name__),
item.span[0], item.span[1])
logger.warning(message)
# self.show_message(message)
item.reader.set_mode(False, False)
self.classes = classes
r = BeginStatement.process_subitem(self, item)
if r is None:
# restore f77 fix mode
self.classes = f77_classes
item.reader.set_mode(False, True)
else:
message = item.reader.format_message(\
'INFORMATION',
'The f90 fix mode resolved the parse pattern issue.'\
' Setting reader to f90 fix mode.',
item.span[0], item.span[1])
logger.info(message)
# self.show_message(message)
# set f90 fix mode
self.classes = f77_classes + classes
self.reader.set_mode(False, False)
return r
self.handle_unknown_item(item)
return
def handle_unknown_item(self, item):
message = item.reader.format_message(\
'WARNING',
'no parse pattern found for "%s" in %r block.'\
% (item.get_line(),self.__class__.__name__),
item.span[0], item.span[1])
logger.warning(message)
# self.show_message(message)
self.content.append(item)
#sys.exit()
return
def analyze(self):
for stmt in self.content:
stmt.analyze()
return
class EndStatement(Statement):
"""
END [<blocktype> [<name>]]
EndStatement instances have additional attributes:
name
blocktype
"""
_repr_attr_names = ['blocktype','name'] + Statement._repr_attr_names
def __init__(self, parent, item):
if not hasattr(self, 'blocktype'):
self.blocktype = self.__class__.__name__.lower()[3:]
Statement.__init__(self, parent, item)
def process_item(self):
item = self.item
line = item.get_line().replace(' ','')[3:]
line = item.apply_map(line)
blocktype = self.blocktype
if line.lower().startswith(blocktype):
line = line[len(blocktype):].strip()
else:
if line:
# not the end of expected block
line = ''
self.isvalid = False
if self.parent.construct_name:
name = self.parent.construct_name
else:
name = self.parent.name
if line:
# line variable is already cast to lower case so would fail if any
# upper case letters exist in the label. Also, fortran is case
# insensitive anyway so we should assume labels may have a
# different case and therefore cast both to the same case in our
# equivalence test.
if line.lower()!=name.lower():
self.warning(\
'expected the end of %r block but got the end of %r, skipping.'\
% (name, line))
self.isvalid = False
self.name = name
def analyze(self):
return
def get_indent_tab(self,deindent=False,isfix=None):
return Statement.get_indent_tab(self, deindent=True,isfix=isfix)
def tofortran(self, isfix=None):
return self.get_indent_tab(isfix=isfix) + 'END %s %s'\
% (self.blocktype.upper(),self.name or '')
| |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import time
import string
import supybot.conf as conf
import supybot.ircdb as ircdb
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3 # for python2.4
import re
from supybot.utils.seq import dameraulevenshtein
def getFactoid(irc, msg, args, state):
assert not state.channel
callConverter('channel', irc, msg, args, state)
separator = state.cb.registryValue('learnSeparator', state.channel)
try:
i = args.index(separator)
except ValueError:
raise callbacks.ArgumentError
args.pop(i)
key = []
value = []
for (j, s) in enumerate(args[:]):
if j < i:
key.append(args.pop(0))
else:
value.append(args.pop(0))
if not key or not value:
raise callbacks.ArgumentError
state.args.append(' '.join(key))
state.args.append(' '.join(value))
def getFactoidId(irc, msg, args, state):
Type = 'key id'
p = lambda i: i > 0
callConverter('int', irc, msg, args, state, Type, p)
addConverter('factoid', getFactoid)
addConverter('factoidId', getFactoidId)
class Factoids(callbacks.Plugin, plugins.ChannelDBHandler):
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
plugins.ChannelDBHandler.__init__(self)
def makeDb(self, filename):
if os.path.exists(filename):
db = sqlite3.connect(filename)
db.text_factory = str
return db
db = sqlite3.connect(filename)
db.text_factory = str
cursor = db.cursor()
cursor.execute("""CREATE TABLE keys (
id INTEGER PRIMARY KEY,
key TEXT UNIQUE ON CONFLICT REPLACE
)""")
cursor.execute("""CREATE TABLE factoids (
id INTEGER PRIMARY KEY,
added_by TEXT,
added_at TIMESTAMP,
fact TEXT UNIQUE ON CONFLICT REPLACE,
locked BOOLEAN
)""")
cursor.execute("""CREATE TABLE relations (
id INTEGER PRIMARY KEY,
key_id INTEGER,
fact_id INTEGER,
usage_count INTEGER
)""")
db.commit()
return db
def getCommandHelp(self, command, simpleSyntax=None):
method = self.getCommandMethod(command)
if method.im_func.func_name == 'learn':
chan = None
if dynamic.msg is not None:
chan = dynamic.msg.args[0]
s = self.registryValue('learnSeparator', chan)
help = callbacks.getHelp
if simpleSyntax is None:
simpleSyntax = conf.get(conf.supybot.reply.showSimpleSyntax,
chan)
if simpleSyntax:
help = callbacks.getSyntax
return help(method,
doc=method._fake__doc__ % (s, s),
name=callbacks.formatCommand(command))
return super(Factoids, self).getCommandHelp(command, simpleSyntax)
def _getKeyAndFactId(self, channel, key, factoid):
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT id FROM keys WHERE key=?", (key,))
keyresults = cursor.fetchall()
cursor.execute("SELECT id FROM factoids WHERE fact=?", (factoid,))
factresults = cursor.fetchall()
return (keyresults, factresults,)
def learn(self, irc, msg, args, channel, key, factoid):
# if neither key nor factoid exist, add them.
# if key exists but factoid doesn't, add factoid, link it to existing key
# if factoid exists but key doesn't, add key, link it to existing factoid
# if both key and factoid already exist, and are linked, do nothing, print nice message
db = self.getDb(channel)
cursor = db.cursor()
(keyid, factid) = self._getKeyAndFactId(channel, key, factoid)
if len(keyid) == 0:
cursor.execute("""INSERT INTO keys VALUES (NULL, ?)""", (key,))
db.commit()
if len(factid) == 0:
if ircdb.users.hasUser(msg.prefix):
name = ircdb.users.getUser(msg.prefix).name
else:
name = msg.nick
cursor.execute("""INSERT INTO factoids VALUES
(NULL, ?, ?, ?, ?)""",
(name, int(time.time()), factoid, 0))
db.commit()
(keyid, factid) = self._getKeyAndFactId(channel, key, factoid)
cursor.execute("""SELECT id, key_id, fact_id from relations
WHERE key_id=? AND fact_id=?""",
(keyid[0][0], factid[0][0],))
existingrelation = cursor.fetchall()
if len(existingrelation) == 0:
cursor.execute("""INSERT INTO relations VALUES (NULL, ?, ?, ?)""",
(keyid[0][0],factid[0][0],0,))
db.commit()
irc.replySuccess()
else:
irc.error("This key-factoid relationship already exists.")
learn = wrap(learn, ['factoid'])
learn._fake__doc__ = """[<channel>] <key> %s <value>
Associates <key> with <value>. <channel> is only
necessary if the message isn't sent on the channel
itself. The word '%s' is necessary to separate the
key from the value. It can be changed to another word
via the learnSeparator registry value.
"""
def _lookupFactoid(self, channel, key):
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT factoids.fact, factoids.id, relations.id FROM factoids, keys, relations
WHERE keys.key LIKE ? AND relations.key_id=keys.id AND relations.fact_id=factoids.id
ORDER BY factoids.id
LIMIT 20""", (key,))
return cursor.fetchall()
def _searchFactoid(self, channel, key):
"""Try to typo-match input to possible factoids.
Assume first letter is correct, to reduce processing time.
First, try a simple wildcard search.
If that fails, use the Damerau-Levenshtein edit-distance metric.
"""
# if you made a typo in a two-character key, boo on you.
if len(key) < 3:
return []
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT key FROM keys WHERE key LIKE ?""", ('%' + key + '%',))
wildcardkeys = cursor.fetchall()
if len(wildcardkeys) > 0:
return [line[0] for line in wildcardkeys]
cursor.execute("""SELECT key FROM keys WHERE key LIKE ?""", (key[0] + '%',))
flkeys = cursor.fetchall()
if len(flkeys) == 0:
return []
flkeys = [line[0] for line in flkeys]
dl_metrics = [dameraulevenshtein(key, sourcekey) for sourcekey in flkeys]
dict_metrics = dict(zip(flkeys, dl_metrics))
if min(dl_metrics) <= 2:
return [key for key,item in dict_metrics.iteritems() if item <= 2]
if min(dl_metrics) <= 3:
return [key for key,item in dict_metrics.iteritems() if item <= 3]
return []
def _updateRank(self, channel, factoids):
if self.registryValue('keepRankInfo', channel):
db = self.getDb(channel)
cursor = db.cursor()
for (fact,factid,relationid) in factoids:
cursor.execute("""SELECT relations.usage_count
FROM relations
WHERE relations.id=?""", (relationid,))
old_count = cursor.fetchall()[0][0]
cursor.execute("UPDATE relations SET usage_count=? WHERE id=?",
(old_count + 1, relationid,))
db.commit()
def _replyFactoids(self, irc, msg, key, channel, factoids,
number=0, error=True, raw=False):
def format_fact(text):
if raw:
return text
else:
return ircutils.standardSubstitute(irc, msg, text)
if factoids:
if number:
try:
irc.reply(format_fact(factoids[number-1][0]))
self._updateRank(channel, [factoids[number-1]])
except IndexError:
irc.error('That\'s not a valid number for that key.')
return
else:
env = {'key': key}
def prefixer(v):
env['value'] = v
formatter = self.registryValue('format', msg.args[0])
return ircutils.standardSubstitute(irc, msg,
formatter, env)
if len(factoids) == 1:
irc.reply(format_fact(prefixer(factoids[0][0])))
else:
factoidsS = []
counter = 1
for factoid in factoids:
factoidsS.append(format('(#%i) %s', counter,
format_fact(factoid[0])))
counter += 1
irc.replies(factoidsS, prefixer=prefixer,
joiner=', or ', onlyPrefixFirst=True)
self._updateRank(channel, factoids)
elif error:
irc.error('No factoid matches that key.')
def _replyApproximateFactoids(self, irc, msg, channel, key, error=True):
if self.registryValue('replyApproximateSearchKeys'):
factoids = self._searchFactoid(channel, key)
if factoids:
keylist = ["'%s'" % (fact,) for fact in factoids]
keylist = ', '.join(keylist)
irc.reply("I do not know about '%s', but I do know about these similar topics: %s" % (key, keylist))
elif error:
irc.error('No factoid matches that key.')
def invalidCommand(self, irc, msg, tokens):
if irc.isChannel(msg.args[0]):
channel = msg.args[0]
if self.registryValue('replyWhenInvalidCommand', channel):
key = ' '.join(tokens)
factoids = self._lookupFactoid(channel, key)
if factoids:
self._replyFactoids(irc, msg, key, channel, factoids, error=False)
else:
self._replyApproximateFactoids(irc, msg, channel, key, error=False)
def whatis(self, irc, msg, args, channel, optlist, words):
"""[<channel>] [--raw] <key> [<number>]
Looks up the value of <key> in the factoid database. If given a
number, will return only that exact factoid. If '--raw' option is
given, no variable substitution will take place on the factoid.
<channel> is only necessary if the message isn't sent in the channel
itself.
"""
raw = False
for (option, arg) in optlist:
if option == 'raw':
raw = True
number = None
if len(words) > 1:
if words[-1].isdigit():
number = int(words.pop())
if number <= 0:
irc.errorInvalid('key id')
key = ' '.join(words)
factoids = self._lookupFactoid(channel, key)
if factoids:
self._replyFactoids(irc, msg, key, channel, factoids, number, raw=raw)
else:
self._replyApproximateFactoids(irc, msg, channel, key)
whatis = wrap(whatis, ['channel',
getopts({'raw': '',}),
many('something')])
def alias(self, irc, msg, args, channel, oldkey, newkey, number):
"""[<channel>] <oldkey> <newkey> [<number>]
Adds a new key <newkey> for factoid associated with <oldkey>.
<number> is only necessary if there's more than one factoid associated
with <oldkey>.
The same action can be accomplished by using the 'learn' function with
a new key but an existing (verbatim) factoid content.
"""
def _getNewKey(channel, newkey, arelation):
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT id FROM keys WHERE key=?""", (newkey,))
newkey_info = cursor.fetchall()
if len(newkey_info) == 1:
# check if we already have the requested relation
cursor.execute("""SELECT id FROM relations WHERE
key_id=? and fact_id=?""",
(newkey_info[0][0], arelation[2]))
existentrelation = cursor.fetchall()
if len(existentrelation) != 0:
newkey_info = False
elif len(newkey_info) == 0:
cursor.execute("""INSERT INTO keys VALUES (NULL, ?)""",
(newkey,))
db.commit()
cursor.execute("""SELECT id FROM keys WHERE key=?""", (newkey,))
newkey_info = cursor.fetchall()
return newkey_info
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT relations.id, relations.key_id, relations.fact_id
FROM keys, relations
WHERE keys.key=? AND
relations.key_id=keys.id""", (oldkey,))
results = cursor.fetchall()
if len(results) == 0:
irc.error('No factoid matches that key.')
return
elif len(results) == 1:
newkey_info = _getNewKey(channel, newkey, results[0])
if newkey_info is not False:
cursor.execute("""INSERT INTO relations VALUES(NULL, ?, ?, ?)""",
(newkey_info[0][0], results[0][2], 0,))
irc.replySuccess()
else:
irc.error('This key-factoid relationship already exists.')
elif len(results) > 1:
try:
arelation = results[number-1]
except IndexError:
irc.error("That's not a valid number for that key.")
return
except TypeError:
irc.error("This key has more than one factoid associated with "
"it, but you have not provided a number.")
return
newkey_info = _getNewKey(channel, newkey, arelation)
if newkey_info is not False:
cursor.execute("""INSERT INTO relations VALUES(NULL, ?, ?, ?)""",
(newkey_info[0][0], arelation[2], 0,))
irc.replySuccess()
else:
irc.error('This key-factoid relationship already exists.')
alias = wrap(alias, ['channel', 'something', 'something', optional('int')])
def rank(self, irc, msg, args, channel, optlist, number):
"""[<channel>] [--plain] [--alpha] [<number>]
Returns a list of top-ranked factoid keys, sorted by usage count
(rank). If <number> is not provided, the default number of factoid keys
returned is set by the rankListLength registry value.
If --plain option is given, rank numbers and usage counts are not
included in output.
If --alpha option is given in addition to --plain, keys are sorted
alphabetically, instead of by rank.
<channel> is only necessary if the message isn't sent in the channel
itself.
"""
if not number:
number = self.registryValue('rankListLength', channel)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT keys.key, relations.usage_count
FROM keys, relations
WHERE relations.key_id=keys.id
ORDER BY relations.usage_count DESC
LIMIT ?""", (number,))
factkeys = cursor.fetchall()
plain=False
alpha=False
for (option, arg) in optlist:
if option == 'plain':
plain = True
elif option =='alpha':
alpha = True
if plain:
s = [ "%s" % (key[0],) for i, key in enumerate(factkeys) ]
if alpha:
s.sort()
else:
s = [ "#%d %s (%d)" % (i+1, key[0], key[1]) for i, key in enumerate(factkeys) ]
irc.reply(", ".join(s))
rank = wrap(rank, ['channel',
getopts({'plain': '', 'alpha': '',}),
optional('int')])
def lock(self, irc, msg, args, channel, key):
"""[<channel>] <key>
Locks the factoid(s) associated with <key> so that they cannot be
removed or added to. <channel> is only necessary if the message isn't
sent in the channel itself.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("UPDATE factoids, keys, relations "
"SET factoids.locked=1 WHERE key LIKE ? AND "
"factoids.id=relations.fact_id AND "
"keys.id=relations.key_id", (key,))
db.commit()
irc.replySuccess()
lock = wrap(lock, ['channel', 'text'])
def unlock(self, irc, msg, args, channel, key):
"""[<channel>] <key>
Unlocks the factoid(s) associated with <key> so that they can be
removed or added to. <channel> is only necessary if the message isn't
sent in the channel itself.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""UPDATE factoids, keys, relations
SET factoids.locked=1 WHERE key LIKE ? AND
factoids.id=relations.fact_id AND
keys.id=relations.key_id""", (key,))
db.commit()
irc.replySuccess()
unlock = wrap(unlock, ['channel', 'text'])
def _deleteRelation(self, channel, relationlist):
db = self.getDb(channel)
cursor = db.cursor()
for (keyid, factid, relationid) in relationlist:
cursor.execute("""DELETE FROM relations where relations.id=?""",
(relationid,))
db.commit()
cursor.execute("""SELECT id FROM relations
WHERE relations.key_id=?""", (keyid,))
remaining_key_relations = cursor.fetchall()
if len(remaining_key_relations) == 0:
cursor.execute("""DELETE FROM keys where id=?""", (keyid,))
cursor.execute("""SELECT id FROM relations
WHERE relations.fact_id=?""", (factid,))
remaining_fact_relations = cursor.fetchall()
if len(remaining_fact_relations) == 0:
cursor.execute("""DELETE FROM factoids where id=?""", (factid,))
db.commit()
def forget(self, irc, msg, args, channel, words):
"""[<channel>] <key> [<number>|*]
Removes a key-fact relationship for key <key> from the factoids
database. If there is more than one such relationship for this key,
a number is necessary to determine which one should be removed.
A * can be used to remove all relationships for <key>.
If as a result, the key (factoid) remains without any relationships to
a factoid (key), it shall be removed from the database.
<channel> is only necessary if
the message isn't sent in the channel itself.
"""
number = None
if len(words) > 1:
if words[-1].isdigit():
number = int(words.pop())
if number <= 0:
irc.errorInvalid('key id')
elif words[-1] == '*':
words.pop()
number = True
key = ' '.join(words)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT keys.id, factoids.id, relations.id
FROM keys, factoids, relations
WHERE key LIKE ? AND
relations.key_id=keys.id AND
relations.fact_id=factoids.id""", (key,))
results = cursor.fetchall()
if len(results) == 0:
irc.error('There is no such factoid.')
elif len(results) == 1 or number is True:
self._deleteRelation(channel, results)
irc.replySuccess()
else:
if number is not None:
#results = cursor.fetchall()
try:
arelation = results[number-1]
except IndexError:
irc.error('Invalid factoid number.')
return
self._deleteRelation(channel, [arelation,])
irc.replySuccess()
else:
irc.error('%s factoids have that key. '
'Please specify which one to remove, '
'or use * to designate all of them.' %
len(results))
forget = wrap(forget, ['channel', many('something')])
def random(self, irc, msg, args, channel):
"""[<channel>]
Returns a random factoid from the database for <channel>. <channel>
is only necessary if the message isn't sent in the channel itself.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT id, key_id, fact_id FROM relations
ORDER BY random()
LIMIT 3""")
results = cursor.fetchall()
if len(results) != 0:
L = []
for (relationid, keyid, factid) in results:
cursor.execute("""SELECT keys.key, factoids.fact
FROM keys, factoids
WHERE factoids.id=? AND
keys.id=?""", (factid,keyid,))
(key,factoid) = cursor.fetchall()[0]
L.append('"%s": %s' % (ircutils.bold(key), factoid))
irc.reply('; '.join(L))
else:
irc.error('I couldn\'t find a factoid.')
random = wrap(random, ['channel'])
def info(self, irc, msg, args, channel, key):
"""[<channel>] <key>
Gives information about the factoid(s) associated with <key>.
<channel> is only necessary if the message isn't sent in the channel
itself.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT id FROM keys WHERE key LIKE ?", (key,))
results = cursor.fetchall()
if len(results) == 0:
irc.error('No factoid matches that key.')
return
id = results[0][0]
cursor.execute("""SELECT factoids.added_by, factoids.added_at, factoids.locked, relations.usage_count
FROM factoids, relations
WHERE relations.key_id=? AND
relations.fact_id=factoids.id
ORDER BY relations.id""", (id,))
factoids = cursor.fetchall()
L = []
counter = 0
for (added_by, added_at, locked, usage_count) in factoids:
counter += 1
added_at = time.strftime(conf.supybot.reply.format.time(),
time.localtime(int(added_at)))
L.append(format('#%i was added by %s at %s, and has been recalled '
'%n',
counter, added_by, added_at, (usage_count, 'time')))
factoids = '; '.join(L)
s = format('Key %q is %s and has %n associated with it: %s',
key, locked and 'locked' or 'not locked',
(counter, 'factoid'), factoids)
irc.reply(s)
info = wrap(info, ['channel', 'text'])
def change(self, irc, msg, args, channel, key, number, replacer):
"""[<channel>] <key> <number> <regexp>
Changes the factoid #<number> associated with <key> according to
<regexp>.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT factoids.id, factoids.fact
FROM keys, factoids, relations
WHERE keys.key LIKE ? AND
keys.id=relations.key_id AND
factoids.id=relations.fact_id""", (key,))
results = cursor.fetchall()
if len(results) == 0:
irc.error(format('I couldn\'t find any key %q', key))
return
elif len(results) < number:
irc.errorInvalid('key id')
(id, fact) = results[number-1]
newfact = replacer(fact)
cursor.execute("UPDATE factoids SET fact=? WHERE id=?", (newfact, id))
db.commit()
irc.replySuccess()
change = wrap(change, ['channel', 'something',
'factoidId', 'regexpReplacer'])
_sqlTrans = string.maketrans('*?', '%_')
def search(self, irc, msg, args, channel, optlist, globs):
"""[<channel>] [--values] [--{regexp} <value>] [<glob> ...]
Searches the keyspace for keys matching <glob>. If --regexp is given,
its associated value is taken as a regexp and matched against the keys.
If --values is given, search the value space instead of the keyspace.
"""
if not optlist and not globs:
raise callbacks.ArgumentError
tables = ['keys']
formats = []
criteria = []
target = 'keys.key'
predicateName = 'p'
db = self.getDb(channel)
for (option, arg) in optlist:
if option == 'values':
target = 'factoids.fact'
if 'factoids' not in tables:
tables.append('factoids')
tables.append('relations')
criteria.append('factoids.id=relations.fact_id AND keys.id=relations.key_id')
elif option == 'regexp':
criteria.append('%s(TARGET)' % predicateName)
def p(s, r=arg):
return int(bool(r.search(s)))
db.create_function(predicateName, 1, p)
predicateName += 'p'
for glob in globs:
criteria.append('TARGET LIKE ?')
formats.append(glob.translate(self._sqlTrans))
cursor = db.cursor()
sql = """SELECT keys.key FROM %s WHERE %s""" % \
(', '.join(tables), ' AND '.join(criteria))
sql = sql + " ORDER BY keys.key"
sql = sql.replace('TARGET', target)
cursor.execute(sql, formats)
results = cursor.fetchall()
if len(results) == 0:
irc.reply('No keys matched that query.')
elif len(results) == 1 and \
self.registryValue('showFactoidIfOnlyOneMatch', channel):
self.whatis(irc, msg, [channel, results[0][0]])
elif len(results) > 100:
irc.reply('More than 100 keys matched that query; '
'please narrow your query.')
else:
keys = [repr(t[0]) for t in results]
s = format('%L', keys)
irc.reply(s)
search = wrap(search, ['channel',
getopts({'values': '', 'regexp': 'regexpMatcher'}),
any('glob')])
Class = Factoids
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| |
"""
This is a Python library that handles calling CryptoPhoto API.
- Main Page
http://cryptophoto.com/
- About Cryptophoto
http://cryptophoto.com/about
- Register to CryptoPhoto
http://cryptophoto.com/demo/register/
Copyright(c) 2016 CryptoPhoto -- http://cryptophoto.com/
AUTHORS: CryptoPhoto
VERSION: 1.20160609
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib2, urllib, hashlib, hmac, time, json, base64
class CryptoPhotoUtils(object):
"""Class containing all operations with the CryptoPhoto service."""
def __init__(self, server, privatekey, publickey, uid):
self.server = server if server else "https://cryptophoto.com"
self.private_key = privatekey
self.public_key = publickey
self.user_id = uid
self.session_id = None
self.secure = True
self.user_agent = 'CryptoPhoto Python'
if self.secure:
self.protocol = 'https'
def __post_request(self, url, value_dict):
"""Sends key/value pairs in value_dict to the specified url in form-style
encoding via HTTP POST"""
for k, v in value_dict.iteritems():
value_dict[k] = self.__tryEncode(v)
params = urllib.urlencode(value_dict)
request = urllib2.Request(
url = url,
data = params,
headers = {
'Content-type' : 'application/x-www-form-urlencoded',
'User-agent' : self.user_agent
}
)
try:
httpresp = urllib2.urlopen(request)
except:
return False
ret = httpresp.read()
httpresp.close()
return ret
def start_session(self, ip, authentication = False):
"""Method for fetching valid session ids from the corresponding
CryptoPhoto service."""
ret = {}
t = int(time.time())
response = self.__post_request('%s/api/get/session' % (self.server), {
'publickey' : self.public_key,
'uid' : self.user_id,
'time' : t,
'signature' : self.make_signature(self.user_id, self.public_key, self.private_key, t),
'ip' : ip,
'authentication': "true" if authentication else "false"
})
if not response:
ret["is_valid"] = False
ret["error"] = "Service-unavailable"
return ret
return_values = response.splitlines()
print return_values
print '\n'
try:
if return_values[0] == 'success':
ret["is_valid"] = True
ret["sid"] = return_values[1]
self.session_id = return_values[1]
self.session_id_used = False
if return_values[2] == 'true':
ret["has_token"] = True
else:
ret["is_valid"] = False
ret["error"] = return_values[1]
if not return_values[3]:
ret["errip"] = ''
else:
ret["errip"] = return_values[3]
except:
ret["is_valid"] = False
ret["error"] = 'Malformed-response'
return ret
def get_auth_widget(self):
"""Fetches a CryptoPhoto authentification widget."""
ret = {}
if self.session_id:
ret["html"] = ('<script type=\"text/javascript\" src=\"%s/api/challenge'
'?sd=%s\"></script>' % (self.server, self.session_id))
ret["is_valid"] = True
else:
ret["is_valid"] = False
ret["error"] = 'invalid-session-id'
return ret
def get_gen_widget(self):
"""Fetches a CryptoPhoto token generation widget."""
ret = {}
if self.session_id:
ret["html"] = ('<script type=\"text/javascript\" src=\"%s/api/token?sd='
'%s\"></script>' % (self.server, self.session_id))
ret["is_valid"] = True
else:
ret["is_valid"] = False
ret["error"] = 'invalid-session-id'
return ret
def verify_response(self, selector, response_row, response_col, cp_phc, ip):
"""Verifies a response for a previously sent CryptoPhoto challenge."""
ret = {}
if selector is None:
ret["is_valid"] = False;
ret["error"] = "Selector is not defined";
return ret
else :
if (response_row is None or response_col is None) and cp_phc is None:
ret["is_valid"] = False;
ret["error"] = "Post data invalid";
return ret
t = int(time.time())
response = self.__post_request('%s/api/verify' % (self.server), {
'publickey' : self.public_key,
'uid' : self.user_id,
'response_row' : response_row,
'response_col' : response_col,
'selector' : selector,
'cph' : cp_phc,
'ip' : ip,
'signature' : self.make_signature(self.user_id, self.public_key, self.private_key, t),
'time' : t
})
if not response:
ret["is_valid"] = False
ret["error"] = "service-unavailable"
return ret
return_values = response.splitlines()
ret["is_valid"] = False
try:
m = return_values[1]
if return_values[0] == 'success':
ret["is_valid"] = True
ret["message"] = m
else:
ret["error"] = m
except:
ret["is_valid"] = False
ret["error"] = 'malformed-response'
return ret
def verify_cptv_response(self, parms):
"""Verifies a response for a previously sent CryptoPhoto transaction verification."""
ret = {}
if not parms['cpJWSrfc7515']:
ret["is_valid"] = False
ret["error"] = "JWT token not provided"
response = self.__post_request('%s/api/verify/cptv.json' % (self.server), {
'token': parms['cpJWSrfc7515']
})
if not response:
ret["is_valid"] = False
ret["error"] = "service-unavailable"
return ret
answer = json.loads(response)
if answer and 'success' in answer:
token = parms['cpJWSrfc7515']
splits = token.split(".")
missing_padding = 4 - len(splits[1]) % 4
if missing_padding:
splits[1] += b'='* missing_padding
claim = json.loads(base64.urlsafe_b64decode(splits[1]))
if claim and 'fieldsOrder' in claim and 'fieldsSha256' in claim:
fieldsOrder = claim['fieldsOrder'];
fieldsSha256 = claim['fieldsSha256'];
fields = fieldsOrder.split(",")
shacontent = ""
for field in fields:
if parms[field]:
shacontent += parms[field]
m = hashlib.sha256()
m.update(shacontent)
shacontent = base64.urlsafe_b64encode(m.digest());
shacontent = shacontent.replace("=","")
fieldsSha256 = fieldsSha256.replace("=","")
if fieldsSha256 == shacontent:
ret["is_valid"] = True
return ret
else:
ret["is_valid"] = False
ret["error"] = "POST/GET fields values have been changed"
return ret
else:
ret["is_valid"] = False
ret["error"] = answer['description']
return ret
def make_signature(self, uid, publickey, privatekey, time):
return hmac.new(privatekey, privatekey + str(time) + uid + publickey, hashlib.sha1).hexdigest()
def __tryEncode(self, s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessControlEntryAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Action object.
"""
PERMIT = "Permit"
DENY = "Deny"
class AppServiceCertificateOrderPatchResourcePropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServiceCertificateOrderPropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServicePlanRestrictions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""App Service plans this offer is restricted to.
"""
NONE = "None"
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class AutoHealActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Predefined action to be taken.
"""
RECYCLE = "Recycle"
LOG_EVENT = "LogEvent"
CUSTOM_ACTION = "CustomAction"
class AzureResourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the Azure resource the hostname is assigned to.
"""
WEBSITE = "Website"
TRAFFIC_MANAGER = "TrafficManager"
class AzureStorageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State of the storage account.
"""
OK = "Ok"
INVALID_CREDENTIALS = "InvalidCredentials"
INVALID_SHARE = "InvalidShare"
class AzureStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of storage.
"""
AZURE_FILES = "AzureFiles"
AZURE_BLOB = "AzureBlob"
class BackupItemStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backup status.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
SKIPPED = "Skipped"
PARTIALLY_SUCCEEDED = "PartiallySucceeded"
DELETE_IN_PROGRESS = "DeleteInProgress"
DELETE_FAILED = "DeleteFailed"
DELETED = "Deleted"
class BackupRestoreOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Operation type.
"""
DEFAULT = "Default"
CLONE = "Clone"
RELOCATION = "Relocation"
SNAPSHOT = "Snapshot"
CLOUD_FS = "CloudFS"
class BuildStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the static site build.
"""
WAITING_FOR_DEPLOYMENT = "WaitingForDeployment"
UPLOADING = "Uploading"
DEPLOYING = "Deploying"
READY = "Ready"
FAILED = "Failed"
DELETING = "Deleting"
DETACHED = "Detached"
class BuiltInAuthenticationProvider(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory"
FACEBOOK = "Facebook"
GOOGLE = "Google"
MICROSOFT_ACCOUNT = "MicrosoftAccount"
TWITTER = "Twitter"
GITHUB = "Github"
class CertificateOrderActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Action type.
"""
CERTIFICATE_ISSUED = "CertificateIssued"
CERTIFICATE_ORDER_CANCELED = "CertificateOrderCanceled"
CERTIFICATE_ORDER_CREATED = "CertificateOrderCreated"
CERTIFICATE_REVOKED = "CertificateRevoked"
DOMAIN_VALIDATION_COMPLETE = "DomainValidationComplete"
FRAUD_DETECTED = "FraudDetected"
ORG_NAME_CHANGE = "OrgNameChange"
ORG_VALIDATION_COMPLETE = "OrgValidationComplete"
SAN_DROP = "SanDrop"
FRAUD_CLEARED = "FraudCleared"
CERTIFICATE_EXPIRED = "CertificateExpired"
CERTIFICATE_EXPIRATION_WARNING = "CertificateExpirationWarning"
FRAUD_DOCUMENTATION_REQUIRED = "FraudDocumentationRequired"
UNKNOWN = "Unknown"
class CertificateOrderStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current order status.
"""
PENDINGISSUANCE = "Pendingissuance"
ISSUED = "Issued"
REVOKED = "Revoked"
CANCELED = "Canceled"
DENIED = "Denied"
PENDINGREVOCATION = "Pendingrevocation"
PENDING_REKEY = "PendingRekey"
UNUSED = "Unused"
EXPIRED = "Expired"
NOT_SUBMITTED = "NotSubmitted"
class CertificateProductType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate product type.
"""
STANDARD_DOMAIN_VALIDATED_SSL = "StandardDomainValidatedSsl"
STANDARD_DOMAIN_VALIDATED_WILD_CARD_SSL = "StandardDomainValidatedWildCardSsl"
class Channels(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""List of channels that this recommendation can apply.
"""
NOTIFICATION = "Notification"
API = "Api"
EMAIL = "Email"
WEBHOOK = "Webhook"
ALL = "All"
class CheckNameResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource type used for verification.
"""
SITE = "Site"
SLOT = "Slot"
HOSTING_ENVIRONMENT = "HostingEnvironment"
PUBLISHING_USER = "PublishingUser"
MICROSOFT_WEB_SITES = "Microsoft.Web/sites"
MICROSOFT_WEB_SITES_SLOTS = "Microsoft.Web/sites/slots"
MICROSOFT_WEB_HOSTING_ENVIRONMENTS = "Microsoft.Web/hostingEnvironments"
MICROSOFT_WEB_PUBLISHING_USERS = "Microsoft.Web/publishingUsers"
class ClientCertMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This composes with ClientCertEnabled setting.
* ClientCertEnabled: false means ClientCert is ignored.
* ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
* ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or
accepted.
"""
REQUIRED = "Required"
OPTIONAL = "Optional"
class CloneAbilityResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of app.
"""
CLONEABLE = "Cloneable"
PARTIALLY_CLONEABLE = "PartiallyCloneable"
NOT_CLONEABLE = "NotCloneable"
class ComputeModeOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Shared/dedicated workers.
"""
SHARED = "Shared"
DEDICATED = "Dedicated"
DYNAMIC = "Dynamic"
class ConnectionStringType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of database.
"""
MY_SQL = "MySql"
SQL_SERVER = "SQLServer"
SQL_AZURE = "SQLAzure"
CUSTOM = "Custom"
NOTIFICATION_HUB = "NotificationHub"
SERVICE_BUS = "ServiceBus"
EVENT_HUB = "EventHub"
API_HUB = "ApiHub"
DOC_DB = "DocDb"
REDIS_CACHE = "RedisCache"
POSTGRE_SQL = "PostgreSQL"
class ContinuousWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
INITIALIZING = "Initializing"
STARTING = "Starting"
RUNNING = "Running"
PENDING_RESTART = "PendingRestart"
STOPPED = "Stopped"
class CookieExpirationConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FIXED_TIME = "FixedTime"
IDENTITY_PROVIDER_DERIVED = "IdentityProviderDerived"
class CustomHostNameDnsRecordType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the DNS record.
"""
C_NAME = "CName"
A = "A"
class DatabaseType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Database type (e.g. SqlAzure / MySql).
"""
SQL_AZURE = "SqlAzure"
MY_SQL = "MySql"
LOCAL_MY_SQL = "LocalMySql"
POSTGRE_SQL = "PostgreSql"
class DnsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current DNS type
"""
AZURE_DNS = "AzureDns"
DEFAULT_DOMAIN_REGISTRAR_DNS = "DefaultDomainRegistrarDns"
class DnsVerificationTestResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""DNS verification test result.
"""
PASSED = "Passed"
FAILED = "Failed"
SKIPPED = "Skipped"
class DomainPatchResourcePropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class DomainPropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class DomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Domain registration status.
"""
ACTIVE = "Active"
AWAITING = "Awaiting"
CANCELLED = "Cancelled"
CONFISCATED = "Confiscated"
DISABLED = "Disabled"
EXCLUDED = "Excluded"
EXPIRED = "Expired"
FAILED = "Failed"
HELD = "Held"
LOCKED = "Locked"
PARKED = "Parked"
PENDING = "Pending"
RESERVED = "Reserved"
REVERTED = "Reverted"
SUSPENDED = "Suspended"
TRANSFERRED = "Transferred"
UNKNOWN = "Unknown"
UNLOCKED = "Unlocked"
UNPARKED = "Unparked"
UPDATED = "Updated"
JSON_CONVERTER_FAILED = "JsonConverterFailed"
class DomainType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Valid values are Regular domain: Azure will charge the full price of domain registration,
SoftDeleted: Purchasing this domain will simply restore it and this operation will not cost
anything.
"""
REGULAR = "Regular"
SOFT_DELETED = "SoftDeleted"
class Enum4(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
WINDOWS_FUNCTIONS = "WindowsFunctions"
LINUX_FUNCTIONS = "LinuxFunctions"
class Enum5(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
WINDOWS_FUNCTIONS = "WindowsFunctions"
LINUX_FUNCTIONS = "LinuxFunctions"
class ForwardProxyConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NO_PROXY = "NoProxy"
STANDARD = "Standard"
CUSTOM = "Custom"
class FrequencyUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of time for how often the backup should be executed (e.g. for weekly backup, this
should be set to Day and FrequencyInterval should be set to 7)
"""
DAY = "Day"
HOUR = "Hour"
class FtpsState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State of FTP / FTPS service
"""
ALL_ALLOWED = "AllAllowed"
FTPS_ONLY = "FtpsOnly"
DISABLED = "Disabled"
class HostingEnvironmentStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current status of the App Service Environment.
"""
PREPARING = "Preparing"
READY = "Ready"
SCALING = "Scaling"
DELETING = "Deleting"
class HostNameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the hostname.
"""
VERIFIED = "Verified"
MANAGED = "Managed"
class HostType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the hostname is a standard or repository hostname.
"""
STANDARD = "Standard"
REPOSITORY = "Repository"
class InAvailabilityReasonType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
""":code:`<code>Invalid</code>` indicates the name provided does not match Azure App Service
naming requirements. :code:`<code>AlreadyExists</code>` indicates that the name is already in
use and is therefore unavailable.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class IpFilterTag(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Defines what this IP filter will be used for. This is to support IP filtering on proxies.
"""
DEFAULT = "Default"
XFF_PROXY = "XffProxy"
SERVICE_TAG = "ServiceTag"
class IssueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Represents the type of the Detector
"""
SERVICE_INCIDENT = "ServiceIncident"
APP_DEPLOYMENT = "AppDeployment"
APP_CRASH = "AppCrash"
RUNTIME_ISSUE_DETECTED = "RuntimeIssueDetected"
ASE_DEPLOYMENT = "AseDeployment"
USER_ISSUE = "UserIssue"
PLATFORM_ISSUE = "PlatformIssue"
OTHER = "Other"
class KeyVaultSecretStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Key Vault secret.
"""
INITIALIZED = "Initialized"
WAITING_ON_CERTIFICATE_ORDER = "WaitingOnCertificateOrder"
SUCCEEDED = "Succeeded"
CERTIFICATE_ORDER_FAILED = "CertificateOrderFailed"
OPERATION_NOT_PERMITTED_ON_KEY_VAULT = "OperationNotPermittedOnKeyVault"
AZURE_SERVICE_UNAUTHORIZED_TO_ACCESS_KEY_VAULT = "AzureServiceUnauthorizedToAccessKeyVault"
KEY_VAULT_DOES_NOT_EXIST = "KeyVaultDoesNotExist"
KEY_VAULT_SECRET_DOES_NOT_EXIST = "KeyVaultSecretDoesNotExist"
UNKNOWN_ERROR = "UnknownError"
EXTERNAL_PRIVATE_KEY = "ExternalPrivateKey"
UNKNOWN = "Unknown"
class LoadBalancingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies which endpoints to serve internally in the Virtual Network for the App Service
Environment.
"""
NONE = "None"
WEB = "Web"
PUBLISHING = "Publishing"
WEB_PUBLISHING = "Web,Publishing"
class LogLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log level.
"""
OFF = "Off"
VERBOSE = "Verbose"
INFORMATION = "Information"
WARNING = "Warning"
ERROR = "Error"
class ManagedPipelineMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed pipeline mode.
"""
INTEGRATED = "Integrated"
CLASSIC = "Classic"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class MSDeployLogEntryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log entry type
"""
MESSAGE = "Message"
WARNING = "Warning"
ERROR = "Error"
class MSDeployProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state
"""
ACCEPTED = "accepted"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED = "failed"
CANCELED = "canceled"
class MySqlMigrationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of migration operation to be done
"""
LOCAL_TO_REMOTE = "LocalToRemote"
REMOTE_TO_LOCAL = "RemoteToLocal"
class NotificationLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Level indicating how critical this recommendation can impact.
"""
CRITICAL = "Critical"
WARNING = "Warning"
INFORMATION = "Information"
NON_URGENT_SUGGESTION = "NonUrgentSuggestion"
class OperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current status of the operation.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of certificate order.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
IN_PROGRESS = "InProgress"
DELETING = "Deleting"
class PublicCertificateLocation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Public Certificate Location
"""
CURRENT_USER_MY = "CurrentUserMy"
LOCAL_MACHINE_MY = "LocalMachineMy"
UNKNOWN = "Unknown"
class PublishingProfileFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
"""
FILE_ZILLA3 = "FileZilla3"
WEB_DEPLOY = "WebDeploy"
FTP = "Ftp"
class RedundancyMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site redundancy mode
"""
NONE = "None"
MANUAL = "Manual"
FAILOVER = "Failover"
ACTIVE_ACTIVE = "ActiveActive"
GEO_REDUNDANT = "GeoRedundant"
class RenderingType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Rendering Type
"""
NO_GRAPH = "NoGraph"
TABLE = "Table"
TIME_SERIES = "TimeSeries"
TIME_SERIES_PER_INSTANCE = "TimeSeriesPerInstance"
class ResourceScopeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a resource type this recommendation applies, e.g. Subscription, ServerFarm, Site.
"""
SERVER_FARM = "ServerFarm"
SUBSCRIPTION = "Subscription"
WEB_SITE = "WebSite"
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
"""
DEFAULT = "DEFAULT"
INHERITED = "INHERITED"
STATIC = "STATIC"
class ScmType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SCM type.
"""
NONE = "None"
DROPBOX = "Dropbox"
TFS = "Tfs"
LOCAL_GIT = "LocalGit"
GIT_HUB = "GitHub"
CODE_PLEX_GIT = "CodePlexGit"
CODE_PLEX_HG = "CodePlexHg"
BITBUCKET_GIT = "BitbucketGit"
BITBUCKET_HG = "BitbucketHg"
EXTERNAL_GIT = "ExternalGit"
EXTERNAL_HG = "ExternalHg"
ONE_DRIVE = "OneDrive"
VSO = "VSO"
VSTSRM = "VSTSRM"
class SiteAvailabilityState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Management information availability state for the app.
"""
NORMAL = "Normal"
LIMITED = "Limited"
DISASTER_RECOVERY_MODE = "DisasterRecoveryMode"
class SiteExtensionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site extension type.
"""
GALLERY = "Gallery"
WEB_ROOT = "WebRoot"
class SiteLoadBalancing(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site load balancing.
"""
WEIGHTED_ROUND_ROBIN = "WeightedRoundRobin"
LEAST_REQUESTS = "LeastRequests"
LEAST_RESPONSE_TIME = "LeastResponseTime"
WEIGHTED_TOTAL_TRAFFIC = "WeightedTotalTraffic"
REQUEST_HASH = "RequestHash"
class SiteRuntimeState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
READY = "READY"
STOPPED = "STOPPED"
UNKNOWN = "UNKNOWN"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
DYNAMIC = "Dynamic"
ISOLATED = "Isolated"
PREMIUM_V2 = "PremiumV2"
ELASTIC_PREMIUM = "ElasticPremium"
ELASTIC_ISOLATED = "ElasticIsolated"
class SolutionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Solution
"""
QUICK_SOLUTION = "QuickSolution"
DEEP_INVESTIGATION = "DeepInvestigation"
BEST_PRACTICES = "BestPractices"
class SslState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SSL type.
"""
DISABLED = "Disabled"
SNI_ENABLED = "SniEnabled"
IP_BASED_ENABLED = "IpBasedEnabled"
class StatusOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""App Service plan status.
"""
READY = "Ready"
PENDING = "Pending"
CREATING = "Creating"
class SupportedTlsVersions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""MinTlsVersion: configures the minimum version of TLS required for SSL requests
"""
ONE0 = "1.0"
ONE1 = "1.1"
ONE2 = "1.2"
class TriggeredWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
SUCCESS = "Success"
FAILED = "Failed"
ERROR = "Error"
class TriggerTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The trigger type of the function
"""
HTTP_TRIGGER = "HttpTrigger"
UNKNOWN = "Unknown"
class UnauthenticatedClientAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The action to take when an unauthenticated client attempts to access the app.
"""
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
ALLOW_ANONYMOUS = "AllowAnonymous"
class UnauthenticatedClientActionV2(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
ALLOW_ANONYMOUS = "AllowAnonymous"
RETURN401 = "Return401"
RETURN403 = "Return403"
class UsageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State indicating whether the app has exceeded its quota usage. Read-only.
"""
NORMAL = "Normal"
EXCEEDED = "Exceeded"
class ValidateResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource type used for verification.
"""
SERVER_FARM = "ServerFarm"
SITE = "Site"
class WebJobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job type.
"""
CONTINUOUS = "Continuous"
TRIGGERED = "Triggered"
class WorkerSizeOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Size of the machines.
"""
SMALL = "Small"
MEDIUM = "Medium"
LARGE = "Large"
D1 = "D1"
D2 = "D2"
D3 = "D3"
NESTED_SMALL = "NestedSmall"
DEFAULT = "Default"
| |
import os
from malwareconfig import fileparser
from malwareconfig.modules import __decoders__
def test_decoders_import():
assert 'AAR' in __decoders__.keys()
assert 'AdWind' in __decoders__.keys()
assert 'Adzok' in __decoders__.keys()
assert 'AlienSpy' in __decoders__.keys()
assert 'Alina' in __decoders__.keys()
assert 'Arcom' in __decoders__.keys()
assert 'BlackNix' in __decoders__.keys()
assert 'BlackShades' in __decoders__.keys()
assert 'BlueBanana' in __decoders__.keys()
assert 'Bozok' in __decoders__.keys()
assert 'ClientMesh' in __decoders__.keys()
assert 'CyberGate' in __decoders__.keys()
assert 'DarkComet' in __decoders__.keys()
assert 'HawkEye' in __decoders__.keys()
assert 'Jbifrost' in __decoders__.keys()
assert 'JRat' in __decoders__.keys()
assert 'LostDoor' in __decoders__.keys()
assert 'LuminosityLink' in __decoders__.keys()
assert 'NanoCore' in __decoders__.keys()
assert 'njRat' in __decoders__.keys()
assert 'Sakula' in __decoders__.keys()
assert 'Xtreme' in __decoders__.keys()
def decode_sample(sample_path):
file_info = fileparser.FileParser(file_path=sample_path)
if file_info.malware_name in __decoders__:
module = __decoders__[file_info.malware_name]['obj']()
module.set_file(file_info)
module.get_config()
conf = module.config
return conf
def test_aar():
sample_path = "tests/samples/aar"
results = decode_sample(sample_path)
assert results['Version'] == '4.x'
def test_adwind():
sample_path = "tests/samples/adwind"
results = decode_sample(sample_path)
assert results['Version'] == 'Adwind RAT v2.0'
def test_adzok():
sample_path = "tests/samples/adzok"
results = decode_sample(sample_path)
assert results['Registry Key'] == 'Winhttpsvc'
def test_alienspy():
sample_path = "tests/samples/alienspy"
results = decode_sample(sample_path)
assert results['pluginfoldername'] == 'ryfne6pMMZ'
#def test_alina():
# sample_path = "tests/samples/alina"
# results = decode_sample(sample_path)
# assert results['pluginfoldername'] == 'ryfne6pMMZ'
def test_arcom():
sample_path = "tests/samples/arcom"
results = decode_sample(sample_path)
assert results['Install Name'] == 'vlc.exe'
def test_blackshades():
sample_path = "tests/samples/blackshades"
results = decode_sample(sample_path)
assert results['Client Control Port'] == '5555'
def test_bluebanana():
sample_path = "tests/samples/bluebanana"
results = decode_sample(sample_path)
assert results['Password'] == '1111'
def test_blacknix():
sample_path = "tests/samples/arcom"
results = decode_sample(sample_path)
assert results['Install Name'] == 'vlc.exe'
def test_bozok():
sample_path = "tests/samples/bozok"
results = decode_sample(sample_path)
assert results['InstallName'] == 'wmiserver.exe'
def test_clientmesh():
sample_path = "tests/samples/clientmesh"
results = decode_sample(sample_path)
assert results['RegistryKey'] == 'Windows Def'
def test_cybergate():
sample_path = "tests/samples/cybergate"
results = decode_sample(sample_path)
assert results['CampaignID'] == 'cyber'
def test_darkcomet():
sample_path = "tests/samples/darkcomet"
results = decode_sample(sample_path)
assert results['MUTEX'] == 'DC_MUTEX-SEJ8D2Y'
def test_darkrat():
sample_path = "tests/samples/darkrat"
results = decode_sample(sample_path)
assert results['Timer Interval'] == b'1000'
def test_hawkeye():
sample_path = "tests/samples/hawkeye"
results = decode_sample(sample_path)
assert results['Key6'] == '587'
def test_hworm():
sample_path = "tests/samples/hworm/wsh-vbs"
results = decode_sample(sample_path)
assert results['host'] == 'domainname.com'
#def test_jbifrost():
# sample_path = "tests/samples/jbifrost"
# results = decode_sample(sample_path)
# assert results['Key6'] == '587'
def test_jrat():
sample_path = "tests/samples/jrat1"
results = decode_sample(sample_path)
assert results['Persistance'] == 'false'
def test_lostdoor():
sample_path = "tests/samples/lostdoor"
results = decode_sample(sample_path)
assert results['Campaign'] == 'My Host'
def test_luminositylink():
sample_path = "tests/samples/luminositylink"
results = decode_sample(sample_path)
assert results['Install Name'] == 'sysmon.exe'
def test_luxnet():
sample_path = "tests/samples/luxnet"
results = decode_sample(sample_path)
assert results['domain'] == '192.168.50.102'
def test_nanocore():
nanocore_tests = {
"1": "Group",
"2": "Kids",
"3": "Group"
}
for filename, groupname in nanocore_tests.items():
sample_path = os.path.join("tests/samples/nanocore/", filename)
results = decode_sample(sample_path)
assert results['Group'].decode('utf-8') == groupname
def test_netwire():
sample_path = "tests/samples/netwire"
results = decode_sample(sample_path)
assert results['Password'] == b'Password'
def test_njrat():
njrat_tests = {
"05e": "0.5.0e",
"07d": "0.7d",
"035": "0.3.5",
"036": "0.3.6",
"041": "0.4.1a",
"064": "0.6.4",
"071": "0.7.1"
}
for filename, version in njrat_tests.items():
sample_path = os.path.join("tests/samples/njrat/", filename)
results = decode_sample(sample_path)
assert results['version'].lower() == version
def test_remcos():
remcos_tests = {
"111": "1.1 Free",
"17pro": "1.7 Pro",
"220": "2.2.0 Light",
"250": "2.5.0 Light"
}
for filename, version in remcos_tests.items():
sample_path = os.path.join("tests/samples/remcos/", filename)
results = decode_sample(sample_path)
assert results['version'] == version
#def test_sakula():
# sample_path = "tests/samples/sakula"
# results = decode_sample(sample_path)
# print(results)
# assert results['Install Name'] == 'Trojan.exe'
def test_saefko():
sample_path = "tests/samples/saefko"
results = decode_sample(sample_path)
assert results['server_pass'] == 'toor'
def test_spynote():
spynote_tests = {
"spynote5": "5.0",
"spynote6.4": "2.1.2.79"
}
for filename, version in spynote_tests.items():
sample_path = os.path.join("tests/samples/spynote/", filename)
results = decode_sample(sample_path)
assert results['Version'] == version
def test_xtreme():
sample_path = "tests/samples/xtreme"
results = decode_sample(sample_path)
assert results['ID'] == 'hack'
| |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
import re
from .faults import SfaAPIError
# for convenience and smoother translation - we should get rid of these functions eventually
def get_leaf(hrn): return Xrn(hrn).get_leaf()
def get_authority(hrn): return Xrn(hrn).get_authority_hrn()
def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type)
def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
class Xrn:
########## basic tools on HRNs
# split a HRN-like string into pieces
# this is like split('.') except for escaped (backslashed) dots
# e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d']
@staticmethod
def hrn_split(hrn):
return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ]
# e.g. hrn_leaf ('a\.b.c.d') -> 'd'
@staticmethod
def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1]
# e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c']
@staticmethod
def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1]
# e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c'
@staticmethod
def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn))
# e.g. escape ('a.b') -> 'a\.b'
@staticmethod
def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token)
# e.g. unescape ('a\.b') -> 'a.b'
@staticmethod
def unescape(token): return token.replace('\\.','.')
# Return the HRN authority chain from top to bottom.
# e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c']
@staticmethod
def hrn_auth_chain(hrn):
parts = Xrn.hrn_auth_list(hrn)
chain = []
for i in range(len(parts)):
chain.append('.'.join(parts[:i+1]))
# Include the HRN itself?
#chain.append(hrn)
return chain
# Is the given HRN a true authority over the namespace of the other
# child HRN?
# A better alternative than childHRN.startswith(parentHRN)
# e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True,
# but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False
# Also hrn_is_auth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True
@staticmethod
def hrn_is_auth_for_hrn(parenthrn, hrn):
if parenthrn == hrn:
return True
for auth in Xrn.hrn_auth_chain(hrn):
if parenthrn == auth:
return True
return False
########## basic tools on URNs
URN_PREFIX = "urn:publicid:IDN"
URN_PREFIX_lower = "urn:publicid:idn"
@staticmethod
def is_urn (text):
return text.lower().startswith(Xrn.URN_PREFIX_lower)
@staticmethod
def urn_full (urn):
if Xrn.is_urn(urn): return urn
else: return Xrn.URN_PREFIX+urn
@staticmethod
def urn_meaningful (urn):
if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):]
else: return urn
@staticmethod
def urn_split (urn):
return Xrn.urn_meaningful(urn).split('+')
####################
# the local fields that are kept consistent
# self.urn
# self.hrn
# self.type
# self.path
# provide either urn, or (hrn + type)
def __init__ (self, xrn, type=None, id=None):
if not xrn: xrn = ""
# user has specified xrn : guess if urn or hrn
self.id = id
self.type = type
if Xrn.is_urn(xrn):
self.hrn=None
self.urn=xrn
self.urn_to_hrn()
if id:
self.hrn_to_urn()
else:
self.urn=None
self.hrn=xrn
self.type=type
self.hrn_to_urn()
self._normalize()
# happens all the time ..
# if not type:
# debug_logger.debug("type-less Xrn's are not safe")
def __repr__ (self):
result="<XRN u=%s h=%s"%(self.urn,self.hrn)
if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf
if hasattr(self,'authority'): result += " auth=%s"%self.authority
result += ">"
return result
def get_urn(self): return self.urn
def get_hrn(self): return self.hrn
def get_type(self): return self.type
def get_hrn_type(self): return (self.hrn, self.type)
def _normalize(self):
if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
if not hasattr(self,'leaf'):
self.leaf=Xrn.hrn_split(self.hrn)[-1]
# self.authority keeps a list
if not hasattr(self,'authority'):
self.authority=Xrn.hrn_auth_list(self.hrn)
def get_leaf(self):
self._normalize()
return self.leaf
def get_authority_hrn(self):
self._normalize()
return '.'.join( self.authority )
def get_authority_urn(self):
self._normalize()
return ':'.join( [Xrn.unescape(x) for x in self.authority] )
def set_authority(self, authority):
"""
update the authority section of an existing urn
"""
authority_hrn = self.get_authority_hrn()
if not authority_hrn.startswith(authority+"."):
self.hrn = authority + "." + self.hrn
self.hrn_to_urn()
self._normalize()
def urn_to_hrn(self):
"""
compute tuple (hrn, type) from urn
"""
# if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):
if not Xrn.is_urn(self.urn):
raise SfaAPIError, "Xrn.urn_to_hrn"
parts = Xrn.urn_split(self.urn)
type=parts.pop(2)
# Remove the authority name (e.g. '.sa')
if type == 'authority':
name = parts.pop()
# Drop the sa. This is a bad hack, but its either this
# or completely change how record types are generated/stored
if name != 'sa':
type = type + "+" + name
name =""
else:
name = parts.pop(len(parts)-1)
# convert parts (list) into hrn (str) by doing the following
# 1. remove blank parts
# 2. escape dots inside parts
# 3. replace ':' with '.' inside parts
# 3. join parts using '.'
hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part])
# dont replace ':' in the name section
if name:
parts = name.split(':')
if len(parts) > 1:
self.id = ":".join(parts[1:])
name = parts[0]
hrn += '.%s' % Xrn.escape(name)
self.hrn=str(hrn)
self.type=str(type)
def hrn_to_urn(self):
"""
compute urn from (hrn, type)
"""
# if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX):
if Xrn.is_urn(self.hrn):
raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn
if self.type and self.type.startswith('authority'):
self.authority = Xrn.hrn_auth_list(self.hrn)
leaf = self.get_leaf()
#if not self.authority:
# self.authority = [self.hrn]
type_parts = self.type.split("+")
self.type = type_parts[0]
name = 'sa'
if len(type_parts) > 1:
name = type_parts[1]
auth_parts = [part for part in [self.get_authority_urn(), leaf] if part]
authority_string = ":".join(auth_parts)
else:
self.authority = Xrn.hrn_auth_list(self.hrn)
name = Xrn.hrn_leaf(self.hrn)
# separate name from id
authority_string = self.get_authority_urn()
if self.type == None:
urn = "+".join(['',authority_string,Xrn.unescape(name)])
else:
urn = "+".join(['',authority_string,self.type,Xrn.unescape(name)])
if hasattr(self, 'id') and self.id:
urn = "%s-%s" % (urn, self.id)
self.urn = Xrn.URN_PREFIX + urn
def dump_string(self):
result="-------------------- XRN\n"
result += "URN=%s\n"%self.urn
result += "HRN=%s\n"%self.hrn
result += "TYPE=%s\n"%self.type
result += "LEAF=%s\n"%self.get_leaf()
result += "AUTH(hrn format)=%s\n"%self.get_authority_hrn()
result += "AUTH(urn format)=%s\n"%self.get_authority_urn()
return result
| |
"""
sphinx.util.osutil
~~~~~~~~~~~~~~~~~~
Operating system-related utility functions for Sphinx.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import contextlib
import errno
import filecmp
import os
import re
import shutil
import sys
import time
import warnings
from io import StringIO
from os import path
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
if False:
# For type annotation
from typing import Any, Iterator, List, Tuple # NOQA
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0) # RemovedInSphinx40Warning
ENOENT = getattr(errno, 'ENOENT', 0) # RemovedInSphinx40Warning
EPIPE = getattr(errno, 'EPIPE', 0) # RemovedInSphinx40Warning
EINVAL = getattr(errno, 'EINVAL', 0) # RemovedInSphinx40Warning
# SEP separates path elements in the canonical file names
#
# Define SEP as a manifest constant, not so much because we expect it to change
# in the future as to avoid the suspicion that a stray "/" in the code is a
# hangover from more *nix-oriented origins.
SEP = "/"
def os_path(canonicalpath):
# type: (str) -> str
return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath):
# type: (str) -> str
"""Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP)
def relative_uri(base, to):
# type: (str, str) -> str
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
b2 = base.split(SEP)
t2 = to.split(SEP)
# remove common segments (except the last segment)
for x, y in zip(b2[:-1], t2[:-1]):
if x != y:
break
b2.pop(0)
t2.pop(0)
if b2 == t2:
# Special case: relative_uri('f/index.html','f/index.html')
# returns '', not 'index.html'
return ''
if len(b2) == 1 and t2 == ['']:
# Special case: relative_uri('f/index.html','f/') should
# return './', not ''
return '.' + SEP
return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2)
def ensuredir(path):
# type: (str) -> None
"""Ensure that a path exists."""
os.makedirs(path, exist_ok=True)
def walk(top, topdown=True, followlinks=False):
# type: (str, bool, bool) -> Iterator[Tuple[str, List[str], List[str]]]
warnings.warn('sphinx.util.osutil.walk() is deprecated for removal. '
'Please use os.walk() instead.',
RemovedInSphinx40Warning)
return os.walk(top, topdown=topdown, followlinks=followlinks)
def mtimes_of_files(dirnames, suffix):
# type: (List[str], str) -> Iterator[float]
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
if sfile.endswith(suffix):
try:
yield path.getmtime(path.join(root, sfile))
except OSError:
pass
def movefile(source, dest):
# type: (str, str) -> None
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
os.unlink(dest)
except OSError:
pass
os.rename(source, dest)
def copytimes(source, dest):
# type: (str, str) -> None
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source, dest):
# type: (str, str) -> None
"""Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed"""
if not path.exists(dest) or not filecmp.cmp(source, dest):
shutil.copyfile(source, dest)
try:
# don't do full copystat because the source may be read-only
copytimes(source, dest)
except OSError:
pass
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
project_suffix_re = re.compile(' Documentation$')
def make_filename(string):
# type: (str) -> str
return no_fn_re.sub('', string) or 'sphinx'
def make_filename_from_project(project):
# type: (str) -> str
return make_filename(project_suffix_re.sub('', project)).lower()
def ustrftime(format, *args):
# type: (str, Any) -> str
"""[DEPRECATED] strftime for unicode strings."""
warnings.warn('sphinx.util.osutil.ustrtime is deprecated for removal',
RemovedInSphinx30Warning, stacklevel=2)
if not args:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
time_struct = time.gmtime(float(source_date_epoch))
args = [time_struct] # type: ignore
# On Windows, time.strftime() and Unicode characters will raise UnicodeEncodeError.
# https://bugs.python.org/issue8304
try:
return time.strftime(format, *args)
except UnicodeEncodeError:
r = time.strftime(format.encode('unicode-escape').decode(), *args)
return r.encode().decode('unicode-escape')
def relpath(path, start=os.curdir):
# type: (str, str) -> str
"""Return a relative filepath to *path* either from the current directory or
from an optional *start* directory.
This is an alternative of ``os.path.relpath()``. This returns original path
if *path* and *start* are on different drives (for Windows platform).
"""
try:
return os.path.relpath(path, start)
except ValueError:
return path
safe_relpath = relpath # for compatibility
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir):
# type: (str) -> str
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
try:
pathdir = pathdir.decode(fs_encoding)
except UnicodeDecodeError:
raise UnicodeDecodeError('multibyte filename not supported on '
'this filesystem encoding '
'(%r)' % fs_encoding)
return pathdir
def getcwd():
# type: () -> str
warnings.warn('sphinx.util.osutil.getcwd() is deprecated. '
'Please use os.getcwd() instead.',
RemovedInSphinx40Warning)
return os.getcwd()
@contextlib.contextmanager
def cd(target_dir):
# type: (str) -> Iterator[None]
cwd = os.getcwd()
try:
os.chdir(target_dir)
yield
finally:
os.chdir(cwd)
class FileAvoidWrite:
"""File-like object that buffers output and only writes if content changed.
Use this class like when writing to a file to avoid touching the original
file if the content hasn't changed. This is useful in scenarios where file
mtime is used to invalidate caches or trigger new behavior.
When writing to this file handle, all writes are buffered until the object
is closed.
Objects can be used as context managers.
"""
def __init__(self, path):
# type: (str) -> None
self._path = path
self._io = None # type: StringIO
def write(self, data):
# type: (str) -> None
if not self._io:
self._io = StringIO()
self._io.write(data)
def close(self):
# type: () -> None
"""Stop accepting writes and write file, if needed."""
if not self._io:
raise Exception('FileAvoidWrite does not support empty files.')
buf = self.getvalue()
self._io.close()
try:
with open(self._path) as old_f:
old_content = old_f.read()
if old_content == buf:
return
except OSError:
pass
with open(self._path, 'w') as f:
f.write(buf)
def __enter__(self):
# type: () -> FileAvoidWrite
return self
def __exit__(self, type, value, traceback):
# type: (str, str, str) -> None
self.close()
def __getattr__(self, name):
# type: (str) -> Any
# Proxy to _io instance.
if not self._io:
raise Exception('Must write to FileAvoidWrite before other '
'methods can be used')
return getattr(self._io, name)
def rmtree(path):
# type: (str) -> None
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
| |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""Contains the main functions/classes for creating, maintaining, and using
an index.
"""
from __future__ import division
import os.path, re, sys
from time import time, sleep
from whoosh import __version__
from whoosh.legacy import toc_loaders
from whoosh.compat import pickle, string_type
from whoosh.fields import ensure_schema
from whoosh.system import _INT_SIZE, _FLOAT_SIZE, _LONG_SIZE
_DEF_INDEX_NAME = "MAIN"
_CURRENT_TOC_VERSION = -111
# Exceptions
class LockError(Exception):
pass
class IndexError(Exception):
"""Generic index error."""
class IndexVersionError(IndexError):
"""Raised when you try to open an index using a format that the current
version of Whoosh cannot read. That is, when the index you're trying to
open is either not backward or forward compatible with this version of
Whoosh.
"""
def __init__(self, msg, version, release=None):
Exception.__init__(self, msg)
self.version = version
self.release = release
class OutOfDateError(IndexError):
"""Raised when you try to commit changes to an index which is not the
latest generation.
"""
class EmptyIndexError(IndexError):
"""Raised when you try to work with an index that has no indexed terms.
"""
# Convenience functions
def create_in(dirname, schema, indexname=None):
"""Convenience function to create an index in a directory. Takes care of
creating a FileStorage object for you.
:param dirname: the path string of the directory in which to create the
index.
:param schema: a :class:`whoosh.fields.Schema` object describing the
index's fields.
:param indexname: the name of the index to create; you only need to specify
this if you are creating multiple indexes within the same storage
object.
:returns: :class:`Index`
"""
from whoosh.filedb.filestore import FileStorage
if not indexname:
indexname = _DEF_INDEX_NAME
storage = FileStorage(dirname)
return FileIndex.create(storage, schema, indexname)
def open_dir(dirname, indexname=None, readonly=False, schema=None):
"""Convenience function for opening an index in a directory. Takes care of
creating a FileStorage object for you. dirname is the filename of the
directory in containing the index. indexname is the name of the index to
create; you only need to specify this if you have multiple indexes within
the same storage object.
:param dirname: the path string of the directory in which to create the
index.
:param indexname: the name of the index to create; you only need to specify
this if you have multiple indexes within the same storage object.
"""
from whoosh.filedb.filestore import FileStorage
if indexname is None:
indexname = _DEF_INDEX_NAME
storage = FileStorage(dirname, readonly=readonly)
return FileIndex(storage, schema=schema, indexname=indexname)
def exists_in(dirname, indexname=None):
"""Returns True if dirname contains a Whoosh index.
:param dirname: the file path of a directory.
:param indexname: the name of the index. If None, the default index name is
used.
"""
if os.path.exists(dirname):
try:
ix = open_dir(dirname, indexname=indexname)
return ix.latest_generation() > -1
except EmptyIndexError:
pass
return False
def exists(storage, indexname=None):
"""Deprecated; use ``storage.index_exists()``.
:param storage: a store.Storage object.
:param indexname: the name of the index. If None, the default index name is
used.
"""
return storage.index_exists(indexname)
def version_in(dirname, indexname=None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the version
number of the on-disk format used for the index -- e.g. -102.
You should avoid attaching significance to the second number (the index
version). This is simply a version number for the TOC file and probably
should not have been exposed in a public interface. The best way to check
if the current version of Whoosh can open an index is to actually try to
open it and see if it raises a ``whoosh.index.IndexVersionError`` exception.
Note that the release and format version are available as attributes on the
Index object in Index.release and Index.version.
:param dirname: the file path of a directory containing an index.
:param indexname: the name of the index. If None, the default index name is
used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
from whoosh.filedb.filestore import FileStorage
storage = FileStorage(dirname)
return version(storage, indexname=indexname)
def version(storage, indexname=None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the version
number of the on-disk format used for the index -- e.g. -102.
You should avoid attaching significance to the second number (the index
version). This is simply a version number for the TOC file and probably
should not have been exposed in a public interface. The best way to check
if the current version of Whoosh can open an index is to actually try to
open it and see if it raises a ``whoosh.index.IndexVersionError`` exception.
Note that the release and format version are available as attributes on the
Index object in Index.release and Index.version.
:param storage: a store.Storage object.
:param indexname: the name of the index. If None, the default index name is
used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
try:
if indexname is None:
indexname = _DEF_INDEX_NAME
ix = storage.open_index(indexname)
return (ix.release, ix.version)
except IndexVersionError:
e = sys.exc_info()[1]
return (None, e.version)
# Index base class
class Index(object):
"""Represents an indexed collection of documents.
"""
def close(self):
"""Closes any open resources held by the Index object itself. This may
not close all resources being used everywhere, for example by a
Searcher object.
"""
pass
def add_field(self, fieldname, fieldspec):
"""Adds a field to the index's schema.
:param fieldname: the name of the field to add.
:param fieldspec: an instantiated :class:`whoosh.fields.FieldType`
object.
"""
w = self.writer()
w.add_field(fieldname, fieldspec)
w.commit()
def remove_field(self, fieldname):
"""Removes the named field from the index's schema. Depending on the
backend implementation, this may or may not actually remove existing
data for the field from the index. Optimizing the index should always
clear out existing data for a removed field.
"""
w = self.writer()
w.remove_field(fieldname)
w.commit()
def latest_generation(self):
"""Returns the generation number of the latest generation of this
index, or -1 if the backend doesn't support versioning.
"""
return -1
def refresh(self):
"""Returns a new Index object representing the latest generation
of this index (if this object is the latest generation, or the backend
doesn't support versioning, returns self).
:returns: :class:`Index`
"""
return self
def up_to_date(self):
"""Returns True if this object represents the latest generation of
this index. Returns False if this object is not the latest generation
(that is, someone else has updated the index since you opened this
object).
"""
return True
def last_modified(self):
"""Returns the last modified time of the index, or -1 if the backend
doesn't support last-modified times.
"""
return -1
def is_empty(self):
"""Returns True if this index is empty (that is, it has never had any
documents successfully written to it.
"""
raise NotImplementedError
def optimize(self):
"""Optimizes this index, if necessary.
"""
pass
def doc_count_all(self):
"""Returns the total number of documents, DELETED OR UNDELETED,
in this index.
"""
r = self.reader()
try:
return r.doc_count_all()
finally:
r.close()
def doc_count(self):
"""Returns the total number of UNDELETED documents in this index.
"""
r = self.reader()
try:
return r.doc_count()
finally:
r.close()
def searcher(self, **kwargs):
"""Returns a Searcher object for this index. Keyword arguments are
passed to the Searcher object's constructor.
:rtype: :class:`whoosh.searching.Searcher`
"""
from whoosh.searching import Searcher
return Searcher(self.reader(), fromindex=self, **kwargs)
def field_length(self, fieldname):
"""Returns the total length of the field across all documents.
"""
r = self.reader()
try:
return r.field_length(fieldname)
finally:
r.close()
def max_field_length(self, fieldname):
"""Returns the maximum length of the field across all documents.
"""
r = self.reader()
try:
return r.max_field_length(fieldname)
finally:
r.close()
def reader(self, reuse=None):
"""Returns an IndexReader object for this index.
:param reuse: an existing reader. Some implementations may recycle
resources from this existing reader to create the new reader. Note
that any resources in the "recycled" reader that are not used by
the new reader will be CLOSED, so you CANNOT use it afterward.
:rtype: :class:`whoosh.reading.IndexReader`
"""
raise NotImplementedError
def writer(self, **kwargs):
"""Returns an IndexWriter object for this index.
:rtype: :class:`whoosh.writing.IndexWriter`
"""
raise NotImplementedError
def delete_by_term(self, fieldname, text, searcher=None):
w = self.writer()
w.delete_by_term(fieldname, text, searcher=searcher)
w.commit()
def delete_by_query(self, q, searcher=None):
w = self.writer()
w.delete_by_query(q, searcher=searcher)
w.commit()
# Codec-based index implementation
def clean_files(storage, indexname, gen, segments):
# Attempts to remove unused index files (called when a new generation
# is created). If existing Index and/or reader objects have the files
# open, they may not be deleted immediately (i.e. on Windows) but will
# probably be deleted eventually by a later call to clean_files.
current_segment_names = set(s.segment_id() for s in segments)
tocpattern = TOC._pattern(indexname)
segpattern = TOC._segment_pattern(indexname)
todelete = set()
for filename in storage:
if filename.startswith("."):
continue
tocm = tocpattern.match(filename)
segm = segpattern.match(filename)
if tocm:
if int(tocm.group(1)) != gen:
todelete.add(filename)
elif segm:
name = segm.group(1)
if name not in current_segment_names:
todelete.add(filename)
for filename in todelete:
try:
storage.delete_file(filename)
except OSError:
# Another process still has this file open, I guess
pass
class FileIndex(Index):
def __init__(self, storage, schema=None, indexname=_DEF_INDEX_NAME):
from whoosh.filedb.filestore import Storage
if not isinstance(storage, Storage):
raise ValueError("%r is not a Storage object" % storage)
if not isinstance(indexname, string_type):
raise ValueError("indexname %r is not a string" % indexname)
if schema:
schema = ensure_schema(schema)
self.storage = storage
self._schema = schema
self.indexname = indexname
# Try reading the TOC to see if it's possible
TOC.read(self.storage, self.indexname, schema=self._schema)
@classmethod
def create(cls, storage, schema, indexname=_DEF_INDEX_NAME):
TOC.create(storage, schema, indexname)
return cls(storage, schema, indexname)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__,
self.storage, self.indexname)
def close(self):
pass
# add_field
# remove_field
def latest_generation(self):
return TOC._latest_generation(self.storage, self.indexname)
# refresh
# up_to_date
def last_modified(self):
gen = self.latest_generation()
filename = TOC._filename(self.indexname, gen)
return self.storage.file_modified(filename)
def is_empty(self):
return len(self._read_toc().segments) == 0
def optimize(self, **kwargs):
w = self.writer(**kwargs)
w.commit(optimize=True)
# searcher
def writer(self, procs=1, **kwargs):
if procs > 1:
from whoosh.multiproc import MpWriter
return MpWriter(self, procs=procs, **kwargs)
else:
from whoosh.writing import SegmentWriter
return SegmentWriter(self, **kwargs)
def lock(self, name):
"""Returns a lock object that you can try to call acquire() on to
lock the index.
"""
return self.storage.lock(self.indexname + "_" + name)
def _read_toc(self):
return TOC.read(self.storage, self.indexname, schema=self._schema)
def _segments(self):
return self._read_toc().segments
def _current_schema(self):
return self._read_toc().schema
@property
def schema(self):
return self._current_schema()
@property
def release(self):
return self._read_toc().release
@property
def version(self):
return self._read_toc().version
@classmethod
def _reader(cls, storage, schema, segments, generation, reuse=None):
# Returns a reader for the given segments, possibly reusing already
# opened readers
from whoosh.reading import SegmentReader, MultiReader, EmptyReader
reusable = {}
try:
if len(segments) == 0:
# This index has no segments! Return an EmptyReader object,
# which simply returns empty or zero to every method
return EmptyReader(schema)
if reuse:
# Put all atomic readers in a dictionary keyed by their
# generation, so we can re-use them if them if possible
readers = [r for r, _ in reuse.leaf_readers()]
reusable = dict((r.generation(), r) for r in readers)
# Make a function to open readers, which reuses reusable readers.
# It removes any readers it reuses from the "reusable" dictionary,
# so later we can close any readers left in the dictionary.
def segreader(segment):
segid = segment.segment_id()
if segid in reusable:
r = reusable[segid]
del reusable[segid]
return r
else:
return SegmentReader(storage, schema, segment,
generation=generation)
if len(segments) == 1:
# This index has one segment, so return a SegmentReader object
# for the segment
return segreader(segments[0])
else:
# This index has multiple segments, so create a list of
# SegmentReaders for the segments, then composite them with a
# MultiReader
readers = [segreader(segment) for segment in segments]
return MultiReader(readers, generation=generation)
finally:
for r in reusable.values():
r.close()
def reader(self, reuse=None):
retries = 10
while retries > 0:
# Read the information from the TOC file
try:
info = self._read_toc()
return self._reader(self.storage, info.schema, info.segments,
info.generation, reuse=reuse)
except IOError:
# Presume that we got a "file not found error" because a writer
# deleted one of the files just as we were trying to open it,
# and so retry a few times before actually raising the
# exception
e = sys.exc_info()[1]
retries -= 1
if retries <= 0:
raise e
sleep(0.05)
# TOC class
class TOC(object):
"""Object representing the state of the index after a commit. Essentially
a container for the index's schema and the list of segment objects.
"""
def __init__(self, schema, segments, generation,
version=_CURRENT_TOC_VERSION, release=__version__):
self.schema = schema
self.segments = segments
self.generation = generation
self.version = version
self.release = release
@classmethod
def _filename(cls, indexname, gen):
return "_%s_%s.toc" % (indexname, gen)
@classmethod
def _pattern(cls, indexname):
return re.compile("^_%s_([0-9]+).toc$" % indexname)
@classmethod
def _segment_pattern(cls, indexname):
return re.compile("(%s_[0-9a-z]+)[.][A-Za-z0-9_.]+" % indexname)
@classmethod
def _latest_generation(cls, storage, indexname):
pattern = cls._pattern(indexname)
mx = -1
for filename in storage:
m = pattern.match(filename)
if m:
mx = max(int(m.group(1)), mx)
return mx
@classmethod
def create(cls, storage, schema, indexname=_DEF_INDEX_NAME):
schema = ensure_schema(schema)
# Clear existing files
prefix = "_%s_" % indexname
for filename in storage:
if filename.startswith(prefix):
storage.delete_file(filename)
# Write a TOC file with an empty list of segments
toc = cls(schema, [], 0)
toc.write(storage, indexname)
@classmethod
def read(cls, storage, indexname, gen=None, schema=None):
if gen is None:
gen = cls._latest_generation(storage, indexname)
if gen < 0:
raise EmptyIndexError("Index %r does not exist in %r"
% (indexname, storage))
# Read the content of this index from the .toc file.
tocfilename = cls._filename(indexname, gen)
stream = storage.open_file(tocfilename)
def check_size(name, target):
sz = stream.read_varint()
if sz != target:
raise IndexError("Index was created on different architecture:"
" saved %s = %s, this computer = %s"
% (name, sz, target))
check_size("int", _INT_SIZE)
check_size("long", _LONG_SIZE)
check_size("float", _FLOAT_SIZE)
if not stream.read_int() == -12345:
raise IndexError("Number misread: byte order problem")
version = stream.read_int()
release = (stream.read_varint(), stream.read_varint(),
stream.read_varint())
if version != _CURRENT_TOC_VERSION:
if version in toc_loaders:
loader = toc_loaders[version]
schema, segments = loader(stream, gen, schema, version)
else:
raise IndexVersionError("Can't read format %s" % version,
version)
else:
# If the user supplied a schema object with the constructor, don't
# load the pickled schema from the saved index.
if schema:
stream.skip_string()
else:
schema = pickle.loads(stream.read_string())
schema = ensure_schema(schema)
# Generation
index_gen = stream.read_int()
assert gen == index_gen
_ = stream.read_int() # Unused
segments = stream.read_pickle()
stream.close()
return cls(schema, segments, gen, version=version, release=release)
def write(self, storage, indexname):
schema = ensure_schema(self.schema)
schema.clean()
# Use a temporary file for atomic write.
tocfilename = self._filename(indexname, self.generation)
tempfilename = '%s.%s' % (tocfilename, time())
stream = storage.create_file(tempfilename)
stream.write_varint(_INT_SIZE)
stream.write_varint(_LONG_SIZE)
stream.write_varint(_FLOAT_SIZE)
stream.write_int(-12345)
stream.write_int(_CURRENT_TOC_VERSION)
for num in __version__[:3]:
stream.write_varint(num)
try:
stream.write_string(pickle.dumps(schema, -1))
except pickle.PicklingError:
# Try to narrow down the error to a single field
for fieldname, field in schema.items():
try:
pickle.dumps(field)
except pickle.PicklingError:
e = sys.exc_info()[1]
raise pickle.PicklingError("%s %s=%r" % (e, fieldname, field))
# Otherwise, re-raise the original exception
raise
stream.write_int(self.generation)
stream.write_int(0) # Unused
stream.write_pickle(self.segments)
stream.close()
# Rename temporary file to the proper filename
storage.rename_file(tempfilename, tocfilename, safe=True)
| |
import numpy as np
class MonomerType(object):
"""docstring for MonomerType"""
def __init__(self, name, parameters):
super(MonomerType, self).__init__()
self.Id = None
self.name = name
self.particles = parameters['Part']
self.bonds = parameters['Bonds']
self.angles = parameters['Angles']
self.dihedrals = parameters['Dihedrals']
def __eq__(self, other):
if self.name == other.name and self.Id == other.Id:
return True
else:
return False
def __ne__(self, other):
if self.name != other.name or self.Id != other.Id:
return True
else:
return False
class AtomType(object):
def __init__(self, name, parameters, unique=False):
self.group_id = 1
self.name = name
self.Id = None
self._mass = 10
self._radius = 2.0
self.charge = 0.0
self.hydrophobicity = 0.0
self.surface_energy = 0.0
self._interacting = True
self.position = [0,0,0]
self.unique = unique
self.set_parameters(parameters)
def set_parameters(self, parameters):
'''Copy the needed parameters from the parameters variable
to the object.
'''
parameter_names = ['mass', 'radius', 'interacting', 'charge', 'hydrophobicity',
'surface_energy', 'position']
if not isinstance(parameters, dict):
raise AttributeError('%s is not a dict' % parameters)
for key in filter(lambda x:x in parameters.keys(), parameter_names):
setattr(self, key, parameters[key])
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = np.array(value)
def mass():
doc = "The mass property."
def fget(self):
return self._mass
def fset(self, value):
self._mass = float(value)
def fdel(self):
del self._mass
return locals()
mass = property(**mass())
def interacting():
doc = "The interacting property."
def fget(self):
return self._interacting
def fset(self, value):
if isinstance(value, basestring):
if value == 'True':
self._interacting = True
elif value == 'False':
self._interacting = False
elif isinstance(value, bool):
self._interacting = value
else:
raise Exception('interacting parameter must be True or False, in strings or as bool')
def fdel(self):
del self._interacting
return locals()
interacting = property(**interacting())
def radius():
doc = "The radius property."
def fget(self):
return self._radius
def fset(self, value):
self._radius = float(value)
def fdel(self):
del self._radius
return locals()
radius = property(**radius())
def charge():
doc = "The charge property."
def fget(self):
return self._charge
def fset(self, value):
self._charge = float(value)
def fdel(self):
del self._charge
return locals()
charge = property(**charge())
def __eq__(self, other):
if self.name == other.name and self.Id == other.Id:
if self.group_id == other.group_id and self.mass == other.mass:
return True
else:
return False
else:
return False
def __ne__(self, other):
if self.name != other.name or self.Id != other.Id:
if self.group_id != other.group_id or self.mass != other.mass:
return True
else:
return False
else:
return False
def __repr__(self):
if self.Id:
return 'AtomType | Name: %s - Atom-Id: %d - Mass: %f' % (self.name, self.Id, self.mass)
else:
return 'AtomType | Name: %s - Atom-Id: N/A - Mass: %f' % (self.name, self.mass)
class ArchType(object):
def __init__(self, name, kind, coeffs):
# self.type_ = {'name': '', 'no': 1}
self.Id = None
self.name = name
self.parameters = {}
self.kind = kind
self.parameters['coeffs'] = coeffs
@classmethod
def from_dict(cls, pair_dict, name='None'):
coef_keys = sorted([key for key in pair_dict if 'coef' in key])
coefficients = [float(pair_dict[key]) for key in coef_keys]
return cls(name, pair_dict['kind'], coefficients)
def __eq__(self, other):
if self.name == other.name and self.Id == other.Id:
if self.kind == other.kind and self.parameters['coeffs'] == other.parameters['coeffs']:
return True
else:
return False
else:
return False
def __ne__(self, other):
if self.name != other.name or self.Id != other.Id:
if self.kind != other.kind or self.parameters['coeffs'] != other.parameters['coeffs']:
return True
else:
return False
else:
return False
class PairType(ArchType):
def __init__(self, name, kind, rep_only, coeffs, cutoff, single_type_parametrised):
super(PairType, self).__init__(name, kind, coeffs)
self.single_type_parametrised = single_type_parametrised
self.repulsive_only = rep_only
self.cutoff = cutoff
if self.repulsive_only and not self.single_type_parametrised:
raise ValueError('Pair potential "{}": Repulsive only potentials need to be single type parametrised!'.format(name))
@classmethod
def from_dict(cls, pair_dict, name='None'):
coef_keys = sorted([key for key in pair_dict if 'coef' in key])
coefficients = [float(pair_dict[key]) for key in coef_keys]
return cls(name, pair_dict['kind'], int(pair_dict['repulsive_only']),
coefficients, float(pair_dict['cutoff']),
pair_dict['single_type_parametrised'])
def __repr__(self):
repr_str = 'Pair Type | Name: {} - Pair-Id: {:> 3d} - Kind: {} \nCoefficients: '
repr_str += '{:> 2f} '*len(self.parameters['coeffs'])
return repr_str.format(self.name, self.Id, self.kind, *self.parameters['coeffs'])
class BondType(ArchType):
def __init__(self, name, kind, coeffs):
super(BondType, self).__init__(name, kind, coeffs)
def __repr__(self):
repr_str = 'Bond Type | Name: {} - Bond-Id: {:> 3d} - Kind: {} \nCoefficients: '
repr_str += '{:> 2f} '*len(self.parameters['coeffs'])
return repr_str.format(self.name, self.Id, self.kind, *self.parameters['coeffs'])
class AngleType(ArchType):
def __init__(self, name, kind, coeffs):
super(AngleType, self).__init__(name, kind, coeffs)
def __repr__(self):
repr_str = 'Angle Type | Name: {} - Bond-Id: {:> 3d} - Kind: {} \nCoefficients: '
repr_str += '{:> 2f} '*len(self.parameters['coeffs'])
return repr_str.format(self.name, self.Id, self.kind, *self.parameters['coeffs'])
class DihedralType(ArchType):
def __init__(self, name, kind, coeffs):
super(DihedralType, self).__init__(name, kind, coeffs)
def __repr__(self):
repr_str = 'Dihedral Type | Name: {} - Bond-Id: {:> 3d} - Kind: {} \nCoefficients: '
repr_str += '{:> 2f} '*len(self.parameters['coeffs'])
return repr_str.format(self.name, self.Id, self.kind, *self.parameters['coeffs'])
class affinity(object):
'''container for the affinity between
two interaction partners
'''
def __init__(self, type1, type2, interaction):
self.type1 = type1
self.type2 = type2
self.strength = interaction
def __repr__(self):
return '%s | %s | strength: %f'% (self.type1, self.type2, self.strength)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
from StringIO import StringIO
from datetime import datetime, timedelta
import re
from genshi.builder import tag
from trac import __version__
from trac.attachment import AttachmentModule
from trac.config import ConfigSection, ExtensionOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_regexps, shorten_result
from trac.util import as_bool
from trac.util.datefmt import parse_date, utc, to_utimestamp, to_datetime, \
get_datetime_format_hint, format_date, \
format_datetime, from_utimestamp, user_time
from trac.util.text import CRLF
from trac.util.translation import _, tag_
from txomon.ticket.api import TicketSystem
from txomon.ticket.model import Milestone, MilestoneCache, Ticket, \
group_milestones
from trac.timeline.api import ITimelineEventProvider
from trac.web import IRequestHandler, RequestDone
from trac.web.chrome import (Chrome, INavigationContributor,
add_link, add_notice, add_script, add_stylesheet,
add_warning, auth_link, prevnext_nav, web_context)
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class ITicketGroupStatsProvider(Interface):
def get_ticket_group_stats(ticket_ids):
""" Gather statistics on a group of tickets.
This method returns a valid `TicketGroupStats` object.
"""
class TicketGroupStats(object):
"""Encapsulates statistics on a group of tickets."""
def __init__(self, title, unit):
"""
:param title: the display name of this group of stats (e.g.
``'ticket status'``)
:param unit: is the units for these stats in plural form,
e.g. ``_('hours'``)
"""
self.title = title
self.unit = unit
self.count = 0
self.qry_args = {}
self.intervals = []
self.done_percent = 0
self.done_count = 0
def add_interval(self, title, count, qry_args, css_class,
overall_completion=None):
"""Adds a division to this stats' group's progress bar.
:param title: the display name (e.g. ``'closed'``, ``'spent
effort'``) of this interval that will be
displayed in front of the unit name
:param count: the number of units in the interval
:param qry_args: a dict of extra params that will yield the
subset of tickets in this interval on a query.
:param css_class: is the css class that will be used to
display the division
:param overall_completion: can be set to true to make this
interval count towards overall
completion of this group of
tickets.
.. versionchanged :: 0.12
deprecated `countsToProg` argument was removed, use
`overall_completion` instead
"""
self.intervals.append({
'title': title,
'count': count,
'qry_args': qry_args,
'css_class': css_class,
'percent': None,
'overall_completion': overall_completion,
})
self.count = self.count + count
def refresh_calcs(self):
if self.count < 1:
return
total_percent = 0
self.done_percent = 0
self.done_count = 0
for interval in self.intervals:
interval['percent'] = round(float(interval['count'] /
float(self.count) * 100))
total_percent = total_percent + interval['percent']
if interval['overall_completion']:
self.done_percent += interval['percent']
self.done_count += interval['count']
# We want the percentages to add up to 100%. To do that, we fudge one
# of the intervals. If we're <100%, we add to the smallest non-zero
# interval. If we're >100%, we subtract from the largest interval.
# The interval is adjusted by enough to make the intervals sum to 100%.
if self.done_count and total_percent != 100:
fudge_amt = 100 - total_percent
fudge_int = [i for i in sorted(self.intervals,
key=lambda k: k['percent'],
reverse=(fudge_amt < 0))
if i['percent']][0]
fudge_int['percent'] += fudge_amt
self.done_percent += fudge_amt
class DefaultTicketGroupStatsProvider(Component):
"""Configurable ticket group statistics provider.
See :teo:`TracIni#milestone-groups-section` for a detailed
example configuration.
"""
implements(ITicketGroupStatsProvider)
milestone_groups_section = ConfigSection('milestone-groups',
"""As the workflow for tickets is now configurable, there can
be many ticket states, and simply displaying closed tickets
vs. all the others is maybe not appropriate in all cases. This
section enables one to easily create ''groups'' of states that
will be shown in different colors in the milestone progress
bar.
Note that the groups can only be based on the ticket
//status//, nothing else. In particular, it's not possible to
distinguish between different closed tickets based on the
//resolution//.
Example configuration with three groups, //closed//, //new//
and //active// (the default only has closed and active):
{{{
# the 'closed' group correspond to the 'closed' tickets
closed = closed
# .order: sequence number in the progress bar
closed.order = 0
# .query_args: optional parameters for the corresponding
# query. In this example, the changes from the
# default are two additional columns ('created' and
# 'modified'), and sorting is done on 'created'.
closed.query_args = group=resolution,order=time,col=id,col=summary,col=owner,col=type,col=priority,col=component,col=severity,col=time,col=changetime
# .overall_completion: indicates groups that count for overall
# completion percentage
closed.overall_completion = true
new = new
new.order = 1
new.css_class = new
new.label = new
# Note: one catch-all group for other statuses is allowed
active = *
active.order = 2
# .css_class: CSS class for this interval
active.css_class = open
# .label: displayed label for this group
active.label = in progress
}}}
The definition consists in a comma-separated list of accepted
status. Also, '*' means any status and could be used to
associate all remaining states to one catch-all group.
The CSS class can be one of: new (yellow), open (no color) or
closed (green). Other styles can easily be added using custom
CSS rule: `table.progress td.<class> { background: <color> }`
to a [TracInterfaceCustomization#SiteAppearance site/style.css] file
for example.
(''since 0.11'')""")
default_milestone_groups = [
{'name': 'closed', 'status': 'closed',
'query_args': 'group=resolution', 'overall_completion': 'true'},
{'name': 'active', 'status': '*', 'css_class': 'open'}
]
def _get_ticket_groups(self):
"""Returns a list of dict describing the ticket groups
in the expected order of appearance in the milestone progress bars.
"""
if 'milestone-groups' in self.config:
groups = {}
order = 0
for groupname, value in self.milestone_groups_section.options():
qualifier = 'status'
if '.' in groupname:
groupname, qualifier = groupname.split('.', 1)
group = groups.setdefault(groupname, {'name': groupname,
'order': order})
group[qualifier] = value
order = max(order, int(group['order'])) + 1
return [group for group in sorted(groups.values(),
key=lambda g: int(g['order']))]
else:
return self.default_milestone_groups
def get_ticket_group_stats(self, ticket_ids):
total_cnt = len(ticket_ids)
all_statuses = set(TicketSystem(self.env).get_all_status())
status_cnt = {}
for s in all_statuses:
status_cnt[s] = 0
if total_cnt:
for status, count in self.env.db_query("""
SELECT status, count(status) FROM ticket
WHERE id IN (%s) GROUP BY status
""" % ",".join(str(x) for x in sorted(ticket_ids))):
status_cnt[status] = count
stat = TicketGroupStats(_('ticket status'), _('tickets'))
remaining_statuses = set(all_statuses)
groups = self._get_ticket_groups()
catch_all_group = None
# we need to go through the groups twice, so that the catch up group
# doesn't need to be the last one in the sequence
for group in groups:
status_str = group['status'].strip()
if status_str == '*':
if catch_all_group:
raise TracError(_(
"'%(group1)s' and '%(group2)s' milestone groups "
"both are declared to be \"catch-all\" groups. "
"Please check your configuration.",
group1=group['name'], group2=catch_all_group['name']))
catch_all_group = group
else:
group_statuses = set([s.strip()
for s in status_str.split(',')]) \
& all_statuses
if group_statuses - remaining_statuses:
raise TracError(_(
"'%(groupname)s' milestone group reused status "
"'%(status)s' already taken by other groups. "
"Please check your configuration.",
groupname=group['name'],
status=', '.join(group_statuses - remaining_statuses)))
else:
remaining_statuses -= group_statuses
group['statuses'] = group_statuses
if catch_all_group:
catch_all_group['statuses'] = remaining_statuses
for group in groups:
group_cnt = 0
query_args = {}
for s, cnt in status_cnt.iteritems():
if s in group['statuses']:
group_cnt += cnt
query_args.setdefault('status', []).append(s)
for arg in [kv for kv in group.get('query_args', '').split(',')
if '=' in kv]:
k, v = [a.strip() for a in arg.split('=', 1)]
query_args.setdefault(k, []).append(v)
stat.add_interval(group.get('label', group['name']),
group_cnt, query_args,
group.get('css_class', group['name']),
as_bool(group.get('overall_completion')))
stat.refresh_calcs()
return stat
def get_ticket_stats(provider, tickets):
return provider.get_ticket_group_stats([t['id'] for t in tickets])
def get_tickets_for_milestone(env, db=None, milestone=None, field='component'):
"""Retrieve all tickets associated with the given `milestone`.
.. versionchanged :: 1.0
the `db` parameter is no longer needed and will be removed in
version 1.1.1
"""
with env.db_query as db:
fields = TicketSystem(env).get_ticket_fields()
if field in [f['name'] for f in fields if not f.get('custom')]:
sql = """SELECT id, status, %s FROM ticket WHERE milestone=%%s
ORDER BY %s""" % (field, field)
args = (milestone,)
else:
sql = """SELECT id, status, value FROM ticket
LEFT OUTER JOIN ticket_custom ON (id=ticket AND name=%s)
WHERE milestone=%s ORDER BY value"""
args = (field, milestone)
return [{'id': tkt_id, 'status': status, field: fieldval}
for tkt_id, status, fieldval in env.db_query(sql, args)]
def apply_ticket_permissions(env, req, tickets):
"""Apply permissions to a set of milestone tickets as returned by
`get_tickets_for_milestone()`."""
return [t for t in tickets
if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
def milestone_stats_data(env, req, stat, name, grouped_by='component',
group=None):
from txomon.ticket.query import QueryModule
has_query = env[QueryModule] is not None
def query_href(extra_args):
if not has_query:
return None
args = {'milestone': name, grouped_by: group, 'group': 'status'}
args.update(extra_args)
return req.href.query(args)
return {'stats': stat,
'stats_href': query_href(stat.qry_args),
'interval_hrefs': [query_href(interval['qry_args'])
for interval in stat.intervals]}
def grouped_stats_data(env, stats_provider, tickets, by, per_group_stats_data):
"""Get the `tickets` stats data grouped by ticket field `by`.
`per_group_stats_data(gstat, group_name)` should return a data dict to
include for the group with field value `group_name`.
"""
group_names = []
for field in TicketSystem(env).get_ticket_fields():
if field['name'] == by:
if 'options' in field:
group_names = field['options']
if field.get('optional'):
group_names.insert(0, '')
else:
group_names = [name for name, in env.db_query("""
SELECT DISTINCT COALESCE(%s, '') FROM ticket
ORDER BY COALESCE(%s, '')
""" % (by, by))]
max_count = 0
data = []
for name in group_names:
values = (name,) if name else (None, name)
group_tickets = [t for t in tickets if t[by] in values]
if not group_tickets:
continue
gstat = get_ticket_stats(stats_provider, group_tickets)
if gstat.count > max_count:
max_count = gstat.count
gs_dict = {'name': name}
gs_dict.update(per_group_stats_data(gstat, name))
data.append(gs_dict)
for gs_dict in data:
percent = 1.0
if max_count:
gstat = gs_dict['stats']
percent = float(gstat.count) / float(max_count) * 100
gs_dict['percent_of_max_total'] = percent
return data
class RoadmapModule(Component):
"""Give an overview over all the milestones."""
implements(INavigationContributor, IPermissionRequestor, IRequestHandler)
stats_provider = ExtensionOption('roadmap', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the roadmap views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
if 'ROADMAP_VIEW' in req.perm:
yield ('mainnav', 'roadmap',
tag.a(_('Roadmap'), href=req.href.roadmap(), accesskey=3))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW', 'ROADMAP_VIEW']
return ['ROADMAP_VIEW'] + [('ROADMAP_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/roadmap'
def process_request(self, req):
req.perm.require('MILESTONE_VIEW')
show = req.args.getlist('show')
if 'all' in show:
show = ['completed']
milestones = Milestone.select(self.env, 'completed' in show)
if 'noduedate' in show:
milestones = [m for m in milestones
if m.due is not None or m.completed]
milestones = [m for m in milestones
if 'MILESTONE_VIEW' in req.perm(m.resource)]
stats = []
queries = []
for milestone in milestones:
tickets = get_tickets_for_milestone(
self.env, milestone=milestone.name, field='owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
stat = get_ticket_stats(self.stats_provider, tickets)
stats.append(milestone_stats_data(self.env, req, stat,
milestone.name))
#milestone['tickets'] = tickets # for the iCalendar view
if req.args.get('format') == 'ics':
self._render_ics(req, milestones)
return
# FIXME should use the 'webcal:' scheme, probably
username = None
if req.authname and req.authname != 'anonymous':
username = req.authname
icshref = req.href.roadmap(show=show, user=username, format='ics')
add_link(req, 'alternate', auth_link(req, icshref), _('iCalendar'),
'text/calendar', 'ics')
data = {
'milestones': milestones,
'milestone_stats': stats,
'queries': queries,
'show': show,
}
add_stylesheet(req, 'common/css/roadmap.css')
return 'roadmap.html', data, None
# Internal methods
def _render_ics(self, req, milestones):
req.send_response(200)
req.send_header('Content-Type', 'text/calendar;charset=utf-8')
buf = StringIO()
from txomon.ticket import Priority
priorities = {}
for priority in Priority.select(self.env):
priorities[priority.name] = float(priority.value)
def get_priority(ticket):
value = priorities.get(ticket['priority'])
if value:
return int((len(priorities) + 8 * value - 9) /
(len(priorities) - 1))
def get_status(ticket):
status = ticket['status']
if status == 'new' or status == 'reopened' and not ticket['owner']:
return 'NEEDS-ACTION'
elif status == 'assigned' or status == 'reopened':
return 'IN-PROCESS'
elif status == 'closed':
if ticket['resolution'] == 'fixed':
return 'COMPLETED'
else:
return 'CANCELLED'
else: return ''
def escape_value(text):
s = ''.join(map(lambda c: '\\' + c if c in ';,\\' else c, text))
return '\\n'.join(re.split(r'[\r\n]+', s))
def write_prop(name, value, params={}):
text = ';'.join([name] + [k + '=' + v for k, v in params.items()]) \
+ ':' + escape_value(value)
firstline = 1
while text:
if not firstline:
text = ' ' + text
else:
firstline = 0
buf.write(text[:75] + CRLF)
text = text[75:]
def write_date(name, value, params={}):
params['VALUE'] = 'DATE'
write_prop(name, format_date(value, '%Y%m%d', req.tz), params)
def write_utctime(name, value, params={}):
write_prop(name, format_datetime(value, '%Y%m%dT%H%M%SZ', utc),
params)
host = req.base_url[req.base_url.find('://') + 3:]
user = req.args.get('user', 'anonymous')
write_prop('BEGIN', 'VCALENDAR')
write_prop('VERSION', '2.0')
write_prop('PRODID', '-//Edgewall Software//NONSGML Trac %s//EN'
% __version__)
write_prop('METHOD', 'PUBLISH')
write_prop('X-WR-CALNAME',
self.env.project_name + ' - ' + _('Roadmap'))
write_prop('X-WR-CALDESC', self.env.project_description)
write_prop('X-WR-TIMEZONE', str(req.tz))
for milestone in milestones:
uid = '<%s/milestone/%s@%s>' % (req.base_path, milestone.name,
host)
if milestone.due:
write_prop('BEGIN', 'VEVENT')
write_prop('UID', uid)
write_utctime('DTSTAMP', milestone.due)
write_date('DTSTART', milestone.due)
write_prop('SUMMARY', _('Milestone %(name)s',
name=milestone.name))
write_prop('URL', req.abs_href.milestone(milestone.name))
if milestone.description:
write_prop('DESCRIPTION', milestone.description)
write_prop('END', 'VEVENT')
tickets = get_tickets_for_milestone(
self.env, milestone=milestone.name, field='owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
for tkt_id in [ticket['id'] for ticket in tickets
if ticket['owner'] == user]:
ticket = Ticket(self.env, tkt_id)
write_prop('BEGIN', 'VTODO')
write_prop('UID', '<%s/ticket/%s@%s>' % (req.base_path,
tkt_id, host))
if milestone.due:
write_prop('RELATED-TO', uid)
write_date('DUE', milestone.due)
write_prop('SUMMARY', _('Ticket #%(num)s: %(summary)s',
num=ticket.id,
summary=ticket['summary']))
write_prop('URL', req.abs_href.ticket(ticket.id))
write_prop('DESCRIPTION', ticket['description'])
priority = get_priority(ticket)
if priority:
write_prop('PRIORITY', unicode(priority))
write_prop('STATUS', get_status(ticket))
if ticket['status'] == 'closed':
for time, in self.env.db_query("""
SELECT time FROM ticket_change
WHERE ticket=%s AND field='status'
ORDER BY time desc LIMIT 1
""", (ticket.id,)):
write_utctime('COMPLETED', from_utimestamp(time))
write_prop('END', 'VTODO')
write_prop('END', 'VCALENDAR')
ics_str = buf.getvalue().encode('utf-8')
req.send_header('Content-Length', len(ics_str))
req.end_headers()
req.write(ics_str)
raise RequestDone
class MilestoneModule(Component):
"""View and edit individual milestones."""
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
ITimelineEventProvider, IWikiSyntaxProvider, IResourceManager,
ISearchSource)
stats_provider = ExtensionOption('milestone', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the milestone views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW']
return actions + [('MILESTONE_ADMIN', actions)]
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'MILESTONE_VIEW' in req.perm:
yield ('milestone', _('Milestones reached'))
def get_timeline_events(self, req, start, stop, filters):
if 'milestone' in filters:
milestone_realm = Resource('milestone')
for name, due, completed, description \
in MilestoneCache(self.env).milestones.itervalues():
if completed and start <= completed <= stop:
# TODO: creation and (later) modifications should also be
# reported
milestone = milestone_realm(id=name)
if 'MILESTONE_VIEW' in req.perm(milestone):
yield ('milestone', completed, '', # FIXME: author?
(milestone, description))
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, milestone_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
milestone, description = event[3]
if field == 'url':
return context.href.milestone(milestone.id)
elif field == 'title':
return tag_('Milestone %(name)s completed',
name=tag.em(milestone.id))
elif field == 'description':
return format_to(self.env, None, context.child(resource=milestone),
description)
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/milestone(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['id'] = match.group(1)
return True
def process_request(self, req):
milestone_id = req.args.get('id')
req.perm('milestone', milestone_id).require('MILESTONE_VIEW')
add_link(req, 'up', req.href.roadmap(), _('Roadmap'))
action = req.args.get('action', 'view')
try:
milestone = Milestone(self.env, milestone_id)
except ResourceNotFound:
if 'MILESTONE_CREATE' not in req.perm('milestone', milestone_id):
raise
milestone = Milestone(self.env, None)
milestone.name = milestone_id
action = 'edit' # rather than 'new' so that it works for POST/save
if req.method == 'POST':
if req.args.has_key('cancel'):
if milestone.exists:
req.redirect(req.href.milestone(milestone.name))
else:
req.redirect(req.href.roadmap())
elif action == 'edit':
return self._do_save(req, milestone)
elif action == 'delete':
self._do_delete(req, milestone)
elif action in ('new', 'edit'):
return self._render_editor(req, milestone)
elif action == 'delete':
return self._render_confirm(req, milestone)
if not milestone.name:
req.redirect(req.href.roadmap())
return self._render_view(req, milestone)
# Internal methods
def _do_delete(self, req, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
retarget_to = None
if req.args.has_key('retarget'):
retarget_to = req.args.get('target') or None
milestone.delete(retarget_to, req.authname)
add_notice(req, _('The milestone "%(name)s" has been deleted.',
name=milestone.name))
req.redirect(req.href.roadmap())
def _do_save(self, req, milestone):
if milestone.exists:
req.perm(milestone.resource).require('MILESTONE_MODIFY')
else:
req.perm(milestone.resource).require('MILESTONE_CREATE')
old_name = milestone.name
new_name = req.args.get('name')
milestone.description = req.args.get('description', '')
if 'due' in req.args:
due = req.args.get('duedate', '')
milestone.due = user_time(req, parse_date, due, hint='datetime') \
if due else None
else:
milestone.due = None
completed = req.args.get('completeddate', '')
retarget_to = req.args.get('target')
# Instead of raising one single error, check all the constraints and
# let the user fix them by going back to edit mode showing the warnings
warnings = []
def warn(msg):
add_warning(req, msg)
warnings.append(msg)
# -- check the name
# If the name has changed, check that the milestone doesn't already
# exist
# FIXME: the whole .exists business needs to be clarified
# (#4130) and should behave like a WikiPage does in
# this respect.
try:
new_milestone = Milestone(self.env, new_name)
if new_milestone.name == old_name:
pass # Creation or no name change
elif new_milestone.name:
warn(_('Milestone "%(name)s" already exists, please '
'choose another name.', name=new_milestone.name))
else:
warn(_('You must provide a name for the milestone.'))
except ResourceNotFound:
milestone.name = new_name
# -- check completed date
if 'completed' in req.args:
completed = user_time(req, parse_date, completed,
hint='datetime') if completed else None
if completed and completed > datetime.now(utc):
warn(_('Completion date may not be in the future'))
else:
completed = None
milestone.completed = completed
if warnings:
return self._render_editor(req, milestone)
# -- actually save changes
if milestone.exists:
milestone.update()
# eventually retarget opened tickets associated with the milestone
if 'retarget' in req.args and completed:
self.env.db_transaction("""
UPDATE ticket SET milestone=%s
WHERE milestone=%s and status != 'closed'
""", (retarget_to, old_name))
self.log.info("Tickets associated with milestone %s "
"retargeted to %s" % (old_name, retarget_to))
else:
milestone.insert()
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.milestone(milestone.name))
def _render_confirm(self, req, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
milestones = [m for m in Milestone.select(self.env)
if m.name != milestone.name
and 'MILESTONE_VIEW' in req.perm(m.resource)]
data = {
'milestone': milestone,
'milestone_groups': group_milestones(milestones,
'TICKET_ADMIN' in req.perm)
}
return 'milestone_delete.html', data, None
def _render_editor(self, req, milestone):
# Suggest a default due time of 18:00 in the user's timezone
now = datetime.now(req.tz)
default_due = datetime(now.year, now.month, now.day, 18)
if now.hour > 18:
default_due += timedelta(days=1)
default_due = to_datetime(default_due, req.tz)
data = {
'milestone': milestone,
'datetime_hint': get_datetime_format_hint(req.lc_time),
'default_due': default_due,
'milestone_groups': [],
}
if milestone.exists:
req.perm(milestone.resource).require('MILESTONE_MODIFY')
milestones = [m for m in Milestone.select(self.env)
if m.name != milestone.name
and 'MILESTONE_VIEW' in req.perm(m.resource)]
data['milestone_groups'] = group_milestones(milestones,
'TICKET_ADMIN' in req.perm)
else:
req.perm(milestone.resource).require('MILESTONE_CREATE')
chrome = Chrome(self.env)
chrome.add_jquery_ui(req)
chrome.add_wiki_toolbars(req)
return 'milestone_edit.html', data, None
def _render_view(self, req, milestone):
milestone_groups = []
available_groups = []
component_group_available = False
ticket_fields = TicketSystem(self.env).get_ticket_fields()
# collect fields that can be used for grouping
for field in ticket_fields:
if field['type'] == 'select' and field['name'] != 'milestone' \
or field['name'] in ('owner', 'reporter'):
available_groups.append({'name': field['name'],
'label': field['label']})
if field['name'] == 'component':
component_group_available = True
# determine the field currently used for grouping
by = None
if component_group_available:
by = 'component'
elif available_groups:
by = available_groups[0]['name']
by = req.args.get('by', by)
tickets = get_tickets_for_milestone(self.env, milestone=milestone.name,
field=by)
tickets = apply_ticket_permissions(self.env, req, tickets)
stat = get_ticket_stats(self.stats_provider, tickets)
context = web_context(req, milestone.resource)
data = {
'context': context,
'milestone': milestone,
'attachments': AttachmentModule(self.env).attachment_data(context),
'available_groups': available_groups,
'grouped_by': by,
'groups': milestone_groups
}
data.update(milestone_stats_data(self.env, req, stat, milestone.name))
if by:
def per_group_stats_data(gstat, group_name):
return milestone_stats_data(self.env, req, gstat,
milestone.name, by, group_name)
milestone_groups.extend(
grouped_stats_data(self.env, self.stats_provider, tickets, by,
per_group_stats_data))
add_stylesheet(req, 'common/css/roadmap.css')
add_script(req, 'common/js/folding.js')
def add_milestone_link(rel, milestone):
href = req.href.milestone(milestone.name, by=req.args.get('by'))
add_link(req, rel, href, _('Milestone "%(name)s"',
name=milestone.name))
milestones = [m for m in Milestone.select(self.env)
if 'MILESTONE_VIEW' in req.perm(m.resource)]
idx = [i for i, m in enumerate(milestones) if m.name == milestone.name]
if idx:
idx = idx[0]
if idx > 0:
add_milestone_link('first', milestones[0])
add_milestone_link('prev', milestones[idx - 1])
if idx < len(milestones) - 1:
add_milestone_link('next', milestones[idx + 1])
add_milestone_link('last', milestones[-1])
prevnext_nav(req, _('Previous Milestone'), _('Next Milestone'),
_('Back to Roadmap'))
return 'milestone_view.html', data, None
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('milestone', self._format_link)
def _format_link(self, formatter, ns, name, label):
name, query, fragment = formatter.split_link(name)
return self._render_link(formatter.context, name, label,
query + fragment)
def _render_link(self, context, name, label, extra=''):
try:
milestone = Milestone(self.env, name)
except TracError:
milestone = None
# Note: the above should really not be needed, `Milestone.exists`
# should simply be false if the milestone doesn't exist in the db
# (related to #4130)
href = context.href.milestone(name)
if milestone and milestone.exists:
if 'MILESTONE_VIEW' in context.perm(milestone.resource):
closed = 'closed ' if milestone.is_completed else ''
return tag.a(label, class_='%smilestone' % closed,
href=href + extra)
elif 'MILESTONE_CREATE' in context.perm('milestone', name):
return tag.a(label, class_='missing milestone', href=href + extra,
rel='nofollow')
return tag.a(label, class_='missing milestone')
# IResourceManager methods
def get_resource_realms(self):
yield 'milestone'
def get_resource_description(self, resource, format=None, context=None,
**kwargs):
desc = resource.id
if format != 'compact':
desc = _('Milestone %(name)s', name=resource.id)
if context:
return self._render_link(context, resource.id, desc)
else:
return desc
def resource_exists(self, resource):
"""
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> m1 = Milestone(env)
>>> m1.name = 'M1'
>>> m1.insert()
>>> MilestoneModule(env).resource_exists(Resource('milestone', 'M1'))
True
>>> MilestoneModule(env).resource_exists(Resource('milestone', 'M2'))
False
"""
return resource.id in MilestoneCache(self.env).milestones
# ISearchSource methods
def get_search_filters(self, req):
if 'MILESTONE_VIEW' in req.perm:
yield ('milestone', _('Milestones'))
def get_search_results(self, req, terms, filters):
if not 'milestone' in filters:
return
term_regexps = search_to_regexps(terms)
milestone_realm = Resource('milestone')
for name, due, completed, description \
in MilestoneCache(self.env).milestones.itervalues():
if any(r.search(description) or r.search(name)
for r in term_regexps):
milestone = milestone_realm(id=name)
if 'MILESTONE_VIEW' in req.perm(milestone):
dt = (completed if completed else
due if due else datetime.now(utc))
yield (get_resource_url(self.env, milestone, req.href),
get_resource_name(self.env, milestone), dt,
'', shorten_result(description, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, milestone_realm, terms):
yield result
| |
from builtins import zip
from builtins import str
import logging
from airflow.exceptions import AirflowException
from airflow.hooks import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CheckOperator(BaseOperator):
"""
Performs checks against a db. The ``CheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* False
* 0
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed
:type sql: string
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql,
conn_id=None,
*args, **kwargs):
super(CheckOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.sql = sql
def execute(self, context=None):
logging.info('Executing SQL check: ' + self.sql)
records = self.get_db_hook().get_first(self.sql)
logging.info("Record: " + str(records))
if not records:
raise AirflowException("The query returned None")
elif not all([bool(r) for r in records]):
exceptstr = "Test failed.\nQuery:\n{q}\nResults:\n{r!s}"
raise AirflowException(exceptstr.format(q=self.sql, r=records))
logging.info("Success.")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
def _convert_to_float_if_possible(s):
'''
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
'''
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret
class ValueCheckOperator(BaseOperator):
"""
Performs a simple value check using sql code.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed
:type sql: string
"""
__mapper_args__ = {
'polymorphic_identity': 'ValueCheckOperator'
}
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
conn_id=None,
*args, **kwargs):
super(ValueCheckOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.conn_id = conn_id
self.pass_value = _convert_to_float_if_possible(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.is_numeric_value_check = isinstance(self.pass_value, float)
self.has_tolerance = self.tol is not None
def execute(self, context=None):
logging.info('Executing SQL check: ' + self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
raise AirflowException("The query returned None")
test_results = []
except_temp = ("Test failed.\nPass value:{self.pass_value}\n"
"Query:\n{self.sql}\nResults:\n{records!s}")
if not self.is_numeric_value_check:
tests = [str(r) == self.pass_value for r in records]
elif self.is_numeric_value_check:
try:
num_rec = [float(r) for r in records]
except (ValueError, TypeError) as e:
cvestr = "Converting a result to float failed.\n"
raise AirflowException(cvestr+except_temp.format(**locals()))
if self.has_tolerance:
tests = [
r / (1 + self.tol) <= self.pass_value <= r / (1 - self.tol)
for r in num_rec]
else:
tests = [r == self.pass_value for r in num_rec]
if not all(tests):
raise AirflowException(except_temp.format(**locals()))
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
class IntervalCheckOperator(BaseOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
"""
__mapper_args__ = {
'polymorphic_identity': 'IntervalCheckOperator'
}
template_fields = ('sql1', 'sql2')
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
conn_id=None,
*args, **kwargs):
super(IntervalCheckOperator, self).__init__(*args, **kwargs)
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
self.conn_id = conn_id
sqlexp = ', '.join(self.metrics_sorted)
sqlt = ("SELECT {sqlexp} FROM {table}"
" WHERE {date_filter_column}=").format(**locals())
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, "+str(self.days_back)+") }}'"
def execute(self, context=None):
hook = self.get_db_hook()
logging.info('Executing SQL check: ' + self.sql2)
row2 = hook.get_first(self.sql2)
logging.info('Executing SQL check: ' + self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
raise AirflowException("The query {q} returned None").format(q=self.sql2)
if not row1:
raise AirflowException("The query {q} returned None").format(q=self.sql1)
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios = {}
test_results = {}
rlog = "Ratio for {0}: {1} \n Ratio threshold : {2}"
fstr = "'{k}' check failed. {r} is above {tr}"
estr = "The following tests have failed:\n {0}"
countstr = "The following {j} tests out of {n} failed:"
for m in self.metrics_sorted:
if current[m] == 0 or reference[m] == 0:
ratio = None
else:
ratio = float(max(current[m], reference[m])) / \
min(current[m], reference[m])
logging.info(rlog.format(m, ratio, self.metrics_thresholds[m]))
ratios[m] = ratio
test_results[m] = ratio < self.metrics_thresholds[m]
if not all(test_results.values()):
failed_tests = [it[0] for it in test_results.items() if not it[1]]
j = len(failed_tests)
n = len(self.metrics_sorted)
logging.warning(countstr.format(**locals()))
for k in failed_tests:
logging.warning(fstr.format(k=k, r=ratios[k],
tr=self.metrics_thresholds[k]))
raise AirflowException(estr.format(", ".join(failed_tests)))
logging.info("All tests have passed")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
| |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
import os
import time
import threading
import pprint
from ambari_agent.ActionQueue import ActionQueue
from ambari_agent.LiveStatus import LiveStatus
logger = logging.getLogger()
"""
RecoveryManager has the following capabilities:
* Store data needed for execution commands extracted from STATUS command
* Generate INSTALL command
* Generate START command
"""
class RecoveryManager:
COMMAND_TYPE = "commandType"
PAYLOAD_LEVEL = "payloadLevel"
COMPONENT_NAME = "componentName"
ROLE = "role"
TASK_ID = "taskId"
DESIRED_STATE = "desiredState"
HAS_STALE_CONFIG = "hasStaleConfigs"
EXECUTION_COMMAND_DETAILS = "executionCommandDetails"
ROLE_COMMAND = "roleCommand"
HOST_LEVEL_PARAMS = "hostLevelParams"
PAYLOAD_LEVEL_DEFAULT = "DEFAULT"
PAYLOAD_LEVEL_MINIMAL = "MINIMAL"
PAYLOAD_LEVEL_EXECUTION_COMMAND = "EXECUTION_COMMAND"
STARTED = "STARTED"
INSTALLED = "INSTALLED"
INIT = "INIT" # TODO: What is the state when machine is reset
INSTALL_FAILED = "INSTALL_FAILED"
COMPONENT_UPDATE_KEY_FORMAT = "{0}_UPDATE_TIME"
COMMAND_REFRESH_DELAY_SEC = 600 #10 minutes
FILENAME = "recovery.json"
default_action_counter = {
"lastAttempt": 0,
"count": 0,
"lastReset": 0,
"lifetimeCount": 0,
"warnedLastAttempt": False,
"warnedLastReset": False,
"warnedThresholdReached": False
}
default_component_status = {
"current": "",
"desired": "",
"stale_config": False
}
def __init__(self, cache_dir, recovery_enabled=False, auto_start_only=False, auto_install_start=False):
self.recovery_enabled = recovery_enabled
self.auto_start_only = auto_start_only
self.auto_install_start = auto_install_start
self.max_count = 6
self.window_in_min = 60
self.retry_gap = 5
self.max_lifetime_count = 12
self.stored_exec_commands = {}
self.id = int(time.time())
self.allowed_desired_states = [self.STARTED, self.INSTALLED]
self.allowed_current_states = [self.INIT, self.INSTALLED]
self.enabled_components = []
self.statuses = {}
self.__status_lock = threading.RLock()
self.__command_lock = threading.RLock()
self.__active_command_lock = threading.RLock()
self.__cache_lock = threading.RLock()
self.active_command_count = 0
self.paused = False
self.recovery_timestamp = -1
if not os.path.exists(cache_dir):
try:
os.makedirs(cache_dir)
except:
logger.critical("[RecoveryManager] Could not create the cache directory {0}".format(cache_dir))
self.__actions_json_file = os.path.join(cache_dir, self.FILENAME)
self.actions = {}
self.update_config(6, 60, 5, 12, recovery_enabled, auto_start_only, auto_install_start, "", -1)
pass
def start_execution_command(self):
with self.__active_command_lock:
self.active_command_count += 1
pass
def stop_execution_command(self):
with self.__active_command_lock:
self.active_command_count -= 1
pass
def has_active_command(self):
return self.active_command_count > 0
def set_paused(self, paused):
if self.paused != paused:
logger.debug("RecoveryManager is transitioning from isPaused = " + str(self.paused) + " to " + str(paused))
self.paused = paused
def enabled(self):
return self.recovery_enabled
def get_current_status(self, component):
if component in self.statuses:
return self.statuses[component]["current"]
pass
def get_desired_status(self, component):
if component in self.statuses:
return self.statuses[component]["desired"]
pass
def update_config_staleness(self, component, is_config_stale):
"""
Updates staleness of config
"""
if component not in self.statuses:
self.__status_lock.acquire()
try:
if component not in self.statuses:
component_status = copy.deepcopy(self.default_component_status)
component_status["stale_config"] = is_config_stale
self.statuses[component] = component_status
finally:
self.__status_lock.release()
pass
self.statuses[component]["stale_config"] = is_config_stale
if self.statuses[component]["current"] == self.statuses[component]["desired"] and \
self.statuses[component]["stale_config"] == False:
self.remove_command(component)
pass
def update_current_status(self, component, state):
"""
Updates the current status of a host component managed by the agent
"""
if component not in self.statuses:
self.__status_lock.acquire()
try:
if component not in self.statuses:
component_status = copy.deepcopy(self.default_component_status)
component_status["current"] = state
self.statuses[component] = component_status
logger.info("New status, current status is set to %s for %s", self.statuses[component]["current"], component)
finally:
self.__status_lock.release()
pass
if self.statuses[component]["current"] != state:
logger.info("current status is set to %s for %s", state, component)
self.statuses[component]["current"] = state
if self.statuses[component]["current"] == self.statuses[component]["desired"] and \
self.statuses[component]["stale_config"] == False:
self.remove_command(component)
pass
def update_desired_status(self, component, state):
"""
Updates the desired status of a host component managed by the agent
"""
if component not in self.statuses:
self.__status_lock.acquire()
try:
if component not in self.statuses:
component_status = copy.deepcopy(self.default_component_status)
component_status["desired"] = state
self.statuses[component] = component_status
logger.info("New status, desired status is set to %s for %s", self.statuses[component]["desired"], component)
finally:
self.__status_lock.release()
pass
if self.statuses[component]["desired"] != state:
logger.info("desired status is set to %s for %s", state, component)
self.statuses[component]["desired"] = state
if self.statuses[component]["current"] == self.statuses[component]["desired"] and \
self.statuses[component]["stale_config"] == False:
self.remove_command(component)
pass
"""
Whether specific components are enabled for recovery.
"""
def configured_for_recovery(self, component):
if len(self.enabled_components) > 0 and component in self.enabled_components:
return True
return False
def requires_recovery(self, component):
"""
Recovery is allowed for:
INISTALLED --> STARTED
INIT --> INSTALLED --> STARTED
RE-INSTALLED (if configs do not match)
"""
if not self.enabled():
return False
if not self.configured_for_recovery(component):
return False
if component not in self.statuses:
return False
status = self.statuses[component]
if self.auto_start_only or self.auto_install_start:
if status["current"] == status["desired"]:
return False
if status["desired"] not in self.allowed_desired_states:
return False
else:
if status["current"] == status["desired"] and status['stale_config'] == False:
return False
if status["desired"] not in self.allowed_desired_states or status["current"] not in self.allowed_current_states:
return False
logger.info("%s needs recovery, desired = %s, and current = %s.", component, status["desired"], status["current"])
return True
pass
def get_recovery_status(self):
"""
Creates a status in the form
{
"summary" : "RECOVERABLE|DISABLED|PARTIALLY_RECOVERABLE|UNRECOVERABLE",
"component_reports" : [
{
"name": "component_name",
"numAttempts" : "x",
"limitReached" : "true|false"
"status" : "REQUIRES_RECOVERY|RECOVERY_COMMAND_REQUESTED|RECOVERY_COMMAND_ISSUED|NO_RECOVERY_NEEDED"
}
]
}
"""
report = {}
report["summary"] = "DISABLED"
if self.enabled():
report["summary"] = "RECOVERABLE"
num_limits_reached = 0
recovery_states = []
report["componentReports"] = recovery_states
self.__status_lock.acquire()
try:
for component in self.actions.keys():
action = self.actions[component]
recovery_state = {}
recovery_state["name"] = component
recovery_state["numAttempts"] = action["lifetimeCount"]
recovery_state["limitReached"] = self.max_lifetime_count <= action["lifetimeCount"]
recovery_states.append(recovery_state)
if recovery_state["limitReached"] == True:
num_limits_reached += 1
pass
finally:
self.__status_lock.release()
if num_limits_reached > 0:
report["summary"] = "PARTIALLY_RECOVERABLE"
if num_limits_reached == len(recovery_states):
report["summary"] = "UNRECOVERABLE"
return report
pass
def get_recovery_commands(self):
"""
This method computes the recovery commands for the following transitions
INSTALLED --> STARTED
INIT --> INSTALLED
INSTALLED_FAILED --> INSTALLED
INSTALLED_FAILED --> STARTED
"""
commands = []
for component in self.statuses.keys():
if self.requires_recovery(component) and self.may_execute(component):
status = copy.deepcopy(self.statuses[component])
command = None
if self.auto_start_only:
if status["desired"] == self.STARTED:
if status["current"] == self.INSTALLED:
command = self.get_start_command(component)
elif self.auto_install_start:
if status["desired"] == self.STARTED:
if status["current"] == self.INSTALLED:
command = self.get_start_command(component)
elif status["current"] == self.INSTALL_FAILED:
command = self.get_install_command(component)
elif status["desired"] == self.INSTALLED:
if status["current"] == self.INSTALL_FAILED:
command = self.get_install_command(component)
else:
# START, INSTALL, RESTART
if status["desired"] != status["current"]:
if status["desired"] == self.STARTED:
if status["current"] == self.INSTALLED:
command = self.get_start_command(component)
elif status["current"] == self.INIT:
command = self.get_install_command(component)
elif status["current"] == self.INSTALL_FAILED:
command = self.get_install_command(component)
elif status["desired"] == self.INSTALLED:
if status["current"] == self.INIT:
command = self.get_install_command(component)
elif status["current"] == self.INSTALL_FAILED:
command = self.get_install_command(component)
elif status["current"] == self.STARTED:
command = self.get_stop_command(component)
else:
if status["current"] == self.INSTALLED:
command = self.get_install_command(component)
elif status["current"] == self.STARTED:
command = self.get_restart_command(component)
if command:
self.execute(component)
commands.append(command)
return commands
pass
def may_execute(self, action):
"""
Check if an action can be executed
"""
if not action or action.strip() == "":
return False
if action not in self.actions:
self.__status_lock.acquire()
try:
self.actions[action] = copy.deepcopy(self.default_action_counter)
finally:
self.__status_lock.release()
return self._execute_action_chk_only(action)
pass
def execute(self, action):
"""
Executed an action
"""
if not action or action.strip() == "":
return False
if action not in self.actions:
self.__status_lock.acquire()
try:
self.actions[action] = copy.deepcopy(self.default_action_counter)
finally:
self.__status_lock.release()
return self._execute_action_(action)
pass
def _execute_action_(self, action_name):
"""
_private_ implementation of [may] execute
"""
action_counter = self.actions[action_name]
now = self._now_()
executed = False
seconds_since_last_attempt = now - action_counter["lastAttempt"]
if action_counter["lifetimeCount"] < self.max_lifetime_count:
#reset if window_in_sec seconds passed since last attempt
if seconds_since_last_attempt > self.window_in_sec:
action_counter["count"] = 0
action_counter["lastReset"] = now
action_counter["warnedLastReset"] = False
if action_counter["count"] < self.max_count:
if seconds_since_last_attempt > self.retry_gap_in_sec:
action_counter["count"] += 1
action_counter["lifetimeCount"] +=1
if self.retry_gap > 0:
action_counter["lastAttempt"] = now
action_counter["warnedLastAttempt"] = False
if action_counter["count"] == 1:
action_counter["lastReset"] = now
executed = True
else:
if action_counter["warnedLastAttempt"] == False:
action_counter["warnedLastAttempt"] = True
logger.warn(
"%s seconds has not passed since last occurrence %s seconds back for %s. " +
"Will silently skip execution without warning till retry gap is passed",
self.retry_gap_in_sec, seconds_since_last_attempt, action_name)
else:
logger.debug(
"%s seconds has not passed since last occurrence %s seconds back for %s",
self.retry_gap_in_sec, seconds_since_last_attempt, action_name)
else:
sec_since_last_reset = now - action_counter["lastReset"]
if sec_since_last_reset > self.window_in_sec:
action_counter["count"] = 1
action_counter["lifetimeCount"] +=1
if self.retry_gap > 0:
action_counter["lastAttempt"] = now
action_counter["lastReset"] = now
action_counter["warnedLastReset"] = False
executed = True
else:
if action_counter["warnedLastReset"] == False:
action_counter["warnedLastReset"] = True
logger.warn("%s occurrences in %s minutes reached the limit for %s. " +
"Will silently skip execution without warning till window is reset",
action_counter["count"], self.window_in_min, action_name)
else:
logger.debug("%s occurrences in %s minutes reached the limit for %s",
action_counter["count"], self.window_in_min, action_name)
else:
if action_counter["warnedThresholdReached"] == False:
action_counter["warnedThresholdReached"] = True
logger.warn("%s occurrences in agent life time reached the limit for %s. " +
"Will silently skip execution without warning till window is reset",
action_counter["lifetimeCount"], action_name)
else:
logger.error("%s occurrences in agent life time reached the limit for %s",
action_counter["lifetimeCount"], action_name)
self._dump_actions()
return executed
pass
def _dump_actions(self):
"""
Dump recovery actions to FS
"""
self.__cache_lock.acquire()
try:
with open(self.__actions_json_file, 'w') as f:
json.dump(self.actions, f, indent=2)
except Exception, exception:
logger.exception("Unable to dump actions to {0}".format(self.__actions_json_file))
return False
finally:
self.__cache_lock.release()
return True
pass
def _load_actions(self):
"""
Loads recovery actions from FS
"""
self.__cache_lock.acquire()
try:
if os.path.isfile(self.__actions_json_file):
with open(self.__actions_json_file, 'r') as fp:
return json.load(fp)
except Exception, exception:
logger.warning("Unable to load recovery actions from {0}.".format(self.__actions_json_file))
finally:
self.__cache_lock.release()
return {}
pass
def get_actions_copy(self):
"""
:return: recovery actions copy
"""
self.__status_lock.acquire()
try:
return copy.deepcopy(self.actions)
finally:
self.__status_lock.release()
pass
def is_action_info_stale(self, action_name):
"""
Checks if the action info is stale
:param action_name:
:return: if the action info for action_name: is stale
"""
if action_name in self.actions:
action_counter = self.actions[action_name]
now = self._now_()
seconds_since_last_attempt = now - action_counter["lastAttempt"]
return seconds_since_last_attempt > self.window_in_sec
return False
pass
def _execute_action_chk_only(self, action_name):
"""
_private_ implementation of [may] execute check only
"""
action_counter = self.actions[action_name]
now = self._now_()
seconds_since_last_attempt = now - action_counter["lastAttempt"]
if action_counter["lifetimeCount"] < self.max_lifetime_count:
if action_counter["count"] < self.max_count:
if seconds_since_last_attempt > self.retry_gap_in_sec:
return True
else:
sec_since_last_reset = now - action_counter["lastReset"]
if sec_since_last_reset > self.window_in_sec:
return True
return False
pass
def _now_(self):
return int(time.time())
pass
def update_configuration_from_registration(self, reg_resp):
"""
TODO: Server sends the recovery configuration - call update_config after parsing
"recoveryConfig": {
"type" : "DEFAULT|AUTO_START|AUTO_INSTALL_START|FULL",
"maxCount" : 10,
"windowInMinutes" : 60,
"retryGap" : 0,
"components" : "a,b",
"recoveryTimestamp" : 1458150424380
}
"""
recovery_enabled = False
auto_start_only = False
auto_install_start = False
max_count = 6
window_in_min = 60
retry_gap = 5
max_lifetime_count = 12
enabled_components = ""
recovery_timestamp = -1 # Default value if recoveryTimestamp is not available.
if reg_resp and "recoveryConfig" in reg_resp:
logger.info("RecoverConfig = " + pprint.pformat(reg_resp["recoveryConfig"]))
config = reg_resp["recoveryConfig"]
if "type" in config:
if config["type"] in ["AUTO_INSTALL_START", "AUTO_START", "FULL"]:
recovery_enabled = True
if config["type"] == "AUTO_START":
auto_start_only = True
elif config["type"] == "AUTO_INSTALL_START":
auto_install_start = True
if "maxCount" in config:
max_count = self._read_int_(config["maxCount"], max_count)
if "windowInMinutes" in config:
window_in_min = self._read_int_(config["windowInMinutes"], window_in_min)
if "retryGap" in config:
retry_gap = self._read_int_(config["retryGap"], retry_gap)
if 'maxLifetimeCount' in config:
max_lifetime_count = self._read_int_(config['maxLifetimeCount'], max_lifetime_count)
if 'components' in config:
enabled_components = config['components']
if 'recoveryTimestamp' in config:
recovery_timestamp = config['recoveryTimestamp']
self.update_config(max_count, window_in_min, retry_gap, max_lifetime_count, recovery_enabled, auto_start_only,
auto_install_start, enabled_components, recovery_timestamp)
pass
"""
Update recovery configuration with the specified values.
max_count - Configured maximum count of recovery attempt allowed per host component in a window.
window_in_min - Configured window size in minutes.
retry_gap - Configured retry gap between tries per host component
max_lifetime_count - Configured maximum lifetime count of recovery attempt allowed per host component.
recovery_enabled - True or False. Indicates whether recovery is enabled or not.
auto_start_only - True if AUTO_START recovery type was specified. False otherwise.
auto_install_start - True if AUTO_INSTALL_START recovery type was specified. False otherwise.
enabled_components - CSV of componenents enabled for auto start.
recovery_timestamp - Timestamp when the recovery values were last updated. -1 on start up.
"""
def update_config(self, max_count, window_in_min, retry_gap, max_lifetime_count, recovery_enabled,
auto_start_only, auto_install_start, enabled_components, recovery_timestamp):
"""
Update recovery configuration, recovery is disabled if configuration values
are not correct
"""
self.recovery_enabled = False;
if max_count <= 0:
logger.warn("Recovery disabled: max_count must be a non-negative number")
return
if window_in_min <= 0:
logger.warn("Recovery disabled: window_in_min must be a non-negative number")
return
if retry_gap < 1:
logger.warn("Recovery disabled: retry_gap must be a positive number and at least 1")
return
if retry_gap >= window_in_min:
logger.warn("Recovery disabled: retry_gap must be smaller than window_in_min")
return
if max_lifetime_count < 0 or max_lifetime_count < max_count:
logger.warn("Recovery disabled: max_lifetime_count must more than 0 and >= max_count")
return
self.max_count = max_count
self.window_in_min = window_in_min
self.retry_gap = retry_gap
self.window_in_sec = window_in_min * 60
self.retry_gap_in_sec = retry_gap * 60
self.auto_start_only = auto_start_only
self.auto_install_start = auto_install_start
self.max_lifetime_count = max_lifetime_count
self.enabled_components = []
self.recovery_timestamp = recovery_timestamp
self.allowed_desired_states = [self.STARTED, self.INSTALLED]
self.allowed_current_states = [self.INIT, self.INSTALL_FAILED, self.INSTALLED, self.STARTED]
if self.auto_start_only:
self.allowed_desired_states = [self.STARTED]
self.allowed_current_states = [self.INSTALLED]
elif self.auto_install_start:
self.allowed_desired_states = [self.INSTALLED, self.STARTED]
self.allowed_current_states = [self.INSTALL_FAILED, self.INSTALLED]
if enabled_components is not None and len(enabled_components) > 0:
components = enabled_components.split(",")
for component in components:
if len(component.strip()) > 0:
self.enabled_components.append(component.strip())
self.recovery_enabled = recovery_enabled
if self.recovery_enabled:
logger.info(
"==> Auto recovery is enabled with maximum %s in %s minutes with gap of %s minutes between and"
" lifetime max being %s. Enabled components - %s",
self.max_count, self.window_in_min, self.retry_gap, self.max_lifetime_count,
', '.join(self.enabled_components))
pass
def get_unique_task_id(self):
self.id += 1
return self.id
pass
def process_status_commands(self, commands):
if not self.enabled():
return
if commands and len(commands) > 0:
for command in commands:
self.store_or_update_command(command)
if self.EXECUTION_COMMAND_DETAILS in command:
logger.debug("Details to construct exec commands: " + pprint.pformat(command[self.EXECUTION_COMMAND_DETAILS]))
pass
def process_execution_commands(self, commands):
if not self.enabled():
return
if commands and len(commands) > 0:
for command in commands:
if self.COMMAND_TYPE in command and command[self.COMMAND_TYPE] == ActionQueue.EXECUTION_COMMAND:
if self.ROLE in command:
if command[self.ROLE_COMMAND] in (ActionQueue.ROLE_COMMAND_INSTALL, ActionQueue.ROLE_COMMAND_STOP) \
and self.configured_for_recovery(command[self.ROLE]):
self.update_desired_status(command[self.ROLE], LiveStatus.DEAD_STATUS)
logger.info("Received EXECUTION_COMMAND (STOP/INSTALL), desired state of " + command[self.ROLE] + " to " +
self.get_desired_status(command[self.ROLE]) )
elif command[self.ROLE_COMMAND] == ActionQueue.ROLE_COMMAND_START \
and self.configured_for_recovery(command[self.ROLE]):
self.update_desired_status(command[self.ROLE], LiveStatus.LIVE_STATUS)
logger.info("Received EXECUTION_COMMAND (START), desired state of " + command[self.ROLE] + " to " +
self.get_desired_status(command[self.ROLE]) )
elif command[self.HOST_LEVEL_PARAMS].has_key('custom_command') and \
command[self.HOST_LEVEL_PARAMS]['custom_command'] == ActionQueue.CUSTOM_COMMAND_RESTART \
and self.configured_for_recovery(command[self.ROLE]):
self.update_desired_status(command[self.ROLE], LiveStatus.LIVE_STATUS)
logger.info("Received EXECUTION_COMMAND (RESTART), desired state of " + command[self.ROLE] + " to " +
self.get_desired_status(command[self.ROLE]) )
pass
def store_or_update_command(self, command):
"""
Stores command details by reading them from the STATUS_COMMAND
Update desired state as well
"""
if not self.enabled():
return
logger.debug("Inspecting command to store/update details")
if self.COMMAND_TYPE in command and command[self.COMMAND_TYPE] == ActionQueue.STATUS_COMMAND:
payloadLevel = self.PAYLOAD_LEVEL_DEFAULT
if self.PAYLOAD_LEVEL in command:
payloadLevel = command[self.PAYLOAD_LEVEL]
component = command[self.COMPONENT_NAME]
self.update_desired_status(component, command[self.DESIRED_STATE])
self.update_config_staleness(component, command[self.HAS_STALE_CONFIG])
if payloadLevel == self.PAYLOAD_LEVEL_EXECUTION_COMMAND:
if self.EXECUTION_COMMAND_DETAILS in command:
# Store the execution command details
self.remove_command(component)
self.add_command(component, command[self.EXECUTION_COMMAND_DETAILS])
logger.debug("Stored command details for " + component)
else:
logger.warn("Expected field " + self.EXECUTION_COMMAND_DETAILS + " unavailable.")
pass
pass
def get_install_command(self, component):
if self.paused:
logger.info("Recovery is paused, likely tasks waiting in pipeline for this host.")
return None
if self.enabled():
logger.debug("Using stored INSTALL command for %s", component)
if self.command_exists(component, ActionQueue.EXECUTION_COMMAND):
command = copy.deepcopy(self.stored_exec_commands[component])
command[self.ROLE_COMMAND] = "INSTALL"
command[self.COMMAND_TYPE] = ActionQueue.AUTO_EXECUTION_COMMAND
command[self.TASK_ID] = self.get_unique_task_id()
return command
else:
logger.info("INSTALL command cannot be computed as details are not received from Server.")
else:
logger.info("Recovery is not enabled. INSTALL command will not be computed.")
return None
pass
def get_stop_command(self, component):
if self.paused:
logger.info("Recovery is paused, likely tasks waiting in pipeline for this host.")
return None
if self.enabled():
logger.debug("Using stored STOP command for %s", component)
if self.command_exists(component, ActionQueue.EXECUTION_COMMAND):
command = copy.deepcopy(self.stored_exec_commands[component])
command[self.ROLE_COMMAND] = "STOP"
command[self.COMMAND_TYPE] = ActionQueue.AUTO_EXECUTION_COMMAND
command[self.TASK_ID] = self.get_unique_task_id()
return command
else:
logger.info("STOP command cannot be computed as details are not received from Server.")
else:
logger.info("Recovery is not enabled. STOP command will not be computed.")
return None
pass
def get_restart_command(self, component):
if self.paused:
logger.info("Recovery is paused, likely tasks waiting in pipeline for this host.")
return None
if self.enabled():
logger.debug("Using stored INSTALL command for %s", component)
if self.command_exists(component, ActionQueue.EXECUTION_COMMAND):
command = copy.deepcopy(self.stored_exec_commands[component])
command[self.ROLE_COMMAND] = "CUSTOM_COMMAND"
command[self.COMMAND_TYPE] = ActionQueue.AUTO_EXECUTION_COMMAND
command[self.TASK_ID] = self.get_unique_task_id()
command[self.HOST_LEVEL_PARAMS]['custom_command'] = 'RESTART'
return command
else:
logger.info("RESTART command cannot be computed as details are not received from Server.")
else:
logger.info("Recovery is not enabled. RESTART command will not be computed.")
return None
pass
def get_start_command(self, component):
if self.paused:
logger.info("Recovery is paused, likely tasks waiting in pipeline for this host.")
return None
if self.enabled():
logger.debug("Using stored START command for %s", component)
if self.command_exists(component, ActionQueue.EXECUTION_COMMAND):
command = copy.deepcopy(self.stored_exec_commands[component])
command[self.ROLE_COMMAND] = "START"
command[self.COMMAND_TYPE] = ActionQueue.AUTO_EXECUTION_COMMAND
command[self.TASK_ID] = self.get_unique_task_id()
return command
else:
logger.info("START command cannot be computed as details are not received from Server.")
else:
logger.info("Recovery is not enabled. START command will not be computed.")
return None
pass
def command_exists(self, component, command_type):
if command_type == ActionQueue.EXECUTION_COMMAND:
self.remove_stale_command(component)
if component in self.stored_exec_commands:
return True
return False
pass
def remove_stale_command(self, component):
component_update_key = self.COMPONENT_UPDATE_KEY_FORMAT.format(component)
if component in self.stored_exec_commands:
insert_time = self.stored_exec_commands[component_update_key]
age = self._now_() - insert_time
if self.COMMAND_REFRESH_DELAY_SEC < age:
logger.debug("Removing stored command for component : " + str(component) + " as its " + str(age) + " sec old")
self.remove_command(component)
pass
def remove_command(self, component):
if component in self.stored_exec_commands:
self.__status_lock.acquire()
try:
component_update_key = self.COMPONENT_UPDATE_KEY_FORMAT.format(component)
del self.stored_exec_commands[component]
del self.stored_exec_commands[component_update_key]
logger.debug("Removed stored command for component : " + str(component))
return True
finally:
self.__status_lock.release()
return False
def add_command(self, component, command):
self.__status_lock.acquire()
try:
component_update_key = self.COMPONENT_UPDATE_KEY_FORMAT.format(component)
self.stored_exec_commands[component] = command
self.stored_exec_commands[component_update_key] = self._now_()
logger.debug("Added command for component : " + str(component))
finally:
self.__status_lock.release()
def _read_int_(self, value, default_value=0):
int_value = default_value
try:
int_value = int(value)
except ValueError:
pass
return int_value
def main(argv=None):
cmd_mgr = RecoveryManager('/tmp')
pass
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Only required for more convenient local development.
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+'/lib')
from cloudagents import CloudAgent
import keystoneclient.v2_0
import novaclient.v1_1
import time
ca = CloudAgent()
ca.required_config = {
"name": "Rolling Server Backup",
"version": "0.1.0",
"author": "Jeff Kramer",
"url": "http://www.hpcloud.com/",
"help": """This script cycles backups for a selected server.""",
"config":
[{
"name": "name",
"regexp": "^.{1,50}$",
"title": "Server Name",
"description": "Name of the server to backup.",
"type": "string",
"required": True,
"resource": "openstack.compute.[region].servers"
},{
"name": "region",
"regexp": "^.{1,50}$",
"title": "Zone",
"description": "Compute zone to create the server in (ie: az-2.region-a.geo-1).",
"type": "string",
"required": True,
"resource": "openstack.compute.endpoints.region"
},{
"name": "daily",
"title": "Daily Backups",
"description": "Number of daily backups to keep.",
"type": "select",
"required": True,
"options": [
{"name": "0", "value": "0"},
{"name": "1", "value": "1"},
{"name": "2", "value": "2"},
{"name": "3", "value": "3"},
{"name": "4", "value": "4"},
{"name": "5", "value": "5"},
{"name": "6", "value": "6"},
{"name": "7", "value": "7"},
]
},{
"name": "weekly",
"title": "Weekly Backups",
"description": "Number of weekly backups to keep.",
"type": "select",
"required": True,
"options": [
{"name": "0", "value": "0"},
{"name": "1", "value": "1"},
{"name": "2", "value": "2"},
{"name": "3", "value": "3"},
{"name": "4", "value": "4"},
{"name": "5", "value": "5"},
{"name": "6", "value": "6"},
{"name": "7", "value": "7"},
]
},{
"name": "weeklyday",
"title": "Weekly Day",
"description": "Day of the week to run weekly backup.",
"type": "select",
"required": True,
"options": [
{"name": "Sunday", "value": "0"},
{"name": "Monday", "value": "1"},
{"name": "Tuesday", "value": "2"},
{"name": "Wednesday", "value": "3"},
{"name": "Thursday", "value": "4"},
{"name": "Friday", "value": "5"},
{"name": "Saturday", "value": "6"}
]
}
]
}
def agent():
ca.log("Starting!",'',1)
keystone = keystoneclient.v2_0.client.Client(token=ca.creds['token'], tenant_id=ca.creds['tenantId'],
auth_url=ca.creds['identity_url'])
compute_catalog = keystone.service_catalog.get_endpoints()['compute']
cluster_endpoint = None
for endpoint in compute_catalog:
if endpoint['region'] == ca.conf['region']:
cluster_endpoint = endpoint
if not cluster_endpoint:
ca.log_fail("Failing, region not found in endpoint list.")
exit()
nova = novaclient.v1_1.client.Client(None,None,None,auth_url="")
nova.set_management_url(cluster_endpoint['publicURL'])
nova.client.auth_token = ca.creds['token']
# Get the image we're supposed to use.
target_server = None
for server in nova.servers.list():
if server.name == ca.conf['name']:
target_server = server
if not target_server:
ca.log_fail("Failing, server "+ca.conf['name']+" not found in "+ca.conf['region']+".")
exit()
ca.log("Found server: "+target_server.name+" ("+str(target_server.id)+")",'',4)
# First, we run our backups.
if ca.conf['daily'] > 0:
backup_server(target_server,target_server.name+" Daily Backup "+time.strftime("%Y-%m-%d"),nova)
if ca.conf['weekly'] > 0 and time.strftime("%w") == ca.conf['weeklyday']:
backup_server(target_server,target_server.name+" Weekly Backup "+time.strftime("%Y-%m-%d"),nova)
ca.log("Looking for backup images to trim.","")
daily_images = {}
weekly_images = {}
for image in nova.images.list():
if image.name.startswith(target_server.name+" Daily Backup "):
daily_images[image.name] = image.id
if image.name.startswith(target_server.name+" Weekly Backup "):
weekly_images[image.name] = image.id
# Then we trim the old ones. If we got this far, the backups succeeded.
for c,image in enumerate(sorted(daily_images.keys(),reverse=True)):
if c+1 > int(ca.conf['daily']):
ca.log("Deleting daily backup "+str(c+1)+": "+image+" ("+daily_images[image]+")","")
nova.images.delete(daily_images[image])
time.sleep(1)
for c,image in enumerate(sorted(weekly_images.keys(),reverse=True)):
if c+1 > int(ca.conf['weekly']):
ca.log("Deleting weekly backup "+str(c+1)+": "+image+" ("+weekly_images[image]+")","")
nova.images.delete(weekly_images[image])
time.sleep(1)
ca.log("Backup complete.","")
def backup_server(server,backup_name,nova):
# Run backup
ca.log("Starting backup: "+backup_name,"")
for image in nova.images.list():
if image.name == backup_name:
ca.log("Deleting pre-existing backup with the same name.","")
nova.images.delete(image.id)
time.sleep(1)
server.create_image(backup_name)
time.sleep(5)
while True:
my_image = None
for image in nova.images.list():
if image.name == backup_name:
my_image = image
if not my_image:
ca.log_fail("Backup not found, image service may be down.","")
exit()
if my_image.status != 'SAVING':
if my_image.status == "ACTIVE":
ca.log("Backup succeeded.","")
break
else:
ca.log_fail("Backup entered unexpected status: "+my_image.status+", failing.","")
exit()
time.sleep(5)
ca.run(agent)
| |
# -*- coding: utf-8 -*-
"""Defines a number of functions to test interactions between
various forms data types.
"""
from __future__ import absolute_import, division, print_function
import math
import numpy as np
from . import rectangle, vector, vector3
from .utils import all_parameters_as_numpy_arrays, parameters_as_numpy_arrays
"""
TODO: line_intersect_plane
TODO: line_segment_intersect_plane
TODO: ray_intersect_ray
TODO: line_intersect_line
TODO: line_segment_intersect_line_segment
"""
@all_parameters_as_numpy_arrays
def point_intersect_line(point, line):
"""Calculates the intersection point of a point and aline.
Performed by checking if the cross-product
of the point relative to the line is
0.
"""
rl = line[1] - line[0]
rp = point - line[0]
cross = vector3.cross(rl, rp)
# check if the cross product is zero
if np.count_nonzero(cross) > 0:
return None
return point
@all_parameters_as_numpy_arrays
def point_intersect_line_segment(point, line):
"""Calculates the intersection point of a point and a line segment.
Performed by checking if the cross-product
of the point relative to the line is
0 and if the dot product of the point
relative to the line start AND the end
point relative to the line start is
less than the segment's squared length.
"""
rl = line[1] - line[0]
rp = point - line[0]
cross = vector3.cross(rl, rp)
dot = vector.dot(rp, rl)
squared_length = vector.squared_length(rl)
if np.count_nonzero(cross) > 0:
return None
if dot < 0.0 or dot > squared_length:
return None
return point
@all_parameters_as_numpy_arrays
def point_intersect_rectangle(point, rect):
"""Calculates the intersection point of a point and a 2D rectangle.
For 3D points, the Z axis will be ignored.
:return: Returns True if the point is touching
or within the rectangle.
"""
left, right, bottom, top = rectangle.bounds(rect)
if \
point[0] < left or \
point[0] > right or \
point[1] < bottom or \
point[1] > top:
return None
return point
@parameters_as_numpy_arrays('ray', 'plane')
def ray_intersect_plane(ray, plane, front_only=False):
"""Calculates the intersection point of a ray and a plane.
:param numpy.array ray: The ray to test for intersection.
:param numpy.array plane: The ray to test for intersection.
:param boolean front_only: Specifies if the ray should
only hit the front of the plane.
Collisions from the rear of the plane will be
ignored.
:return The intersection point, or None
if the ray is parallel to the plane.
Returns None if the ray intersects the back
of the plane and front_only is True.
"""
"""
Distance to plane is defined as
t = (pd - p0.n) / rd.n
where:
rd is the ray direction
pd is the point on plane . plane normal
p0 is the ray position
n is the plane normal
if rd.n == 0, the ray is parallel to the
plane.
"""
p = plane[:3] * plane[3]
n = plane[:3]
rd_n = vector.dot(ray[1], n)
if rd_n == 0.0:
return None
if front_only == True:
if rd_n >= 0.0:
return None
pd = vector.dot(p, n)
p0_n = vector.dot(ray[0], n)
t = (pd - p0_n) / rd_n
return ray[0] + (ray[1] * t)
@all_parameters_as_numpy_arrays
def point_closest_point_on_ray(point, ray):
"""Calculates the point on a ray that is closest to a point.
:param numpy.array point: The point to check with.
:param numpy.array ray: The ray to check against.
:rtype: numpy.array
:return: The closest point on the ray to the point.
"""
"""
t = (p - rp).n
cp = rp + (n * t)
where
p is the point
rp is the ray origin
n is the ray normal of unit length
t is the distance along the ray to the point
"""
normalised_n = vector.normalise(ray[1])
relative_point = (point - ray[0])
t = vector.dot(relative_point, normalised_n)
return ray[0] + (normalised_n * t)
@all_parameters_as_numpy_arrays
def point_closest_point_on_line(point, line):
"""Calculates the point on the line that is closest to
the specified point.
:param numpy.array point: The point to check with.
:param numpy.array line: The line to check against.
:rtype: numpy.array
:return: The closest point on the line to the point.
"""
"""
rl = va->b (relative line)
rp = va->p (relative point)
u' = u / |u| (normalise)
cp = a + (u' * (u'.v))
where:
a = line start
b = line end
p = point
cp = closest point
"""
rl = line[1] - line[0]
rp = point - line[0]
rl = vector.normalise(rl)
dot = vector.dot(rl, rp)
return line[0] + (rl * dot)
@all_parameters_as_numpy_arrays
def point_closest_point_on_line_segment(point, segment):
"""Calculates the point on the line segment that is closest
to the specified point.
This is similar to point_closest_point_on_line, except this
is against the line segment of finite length. Whereas point_closest_point_on_line
checks against a line of infinite length.
:param numpy.array point: The point to check with.
:param numpy.array line_segment: The finite line segment to check against.
:rtype: numpy.array
:return: The closest point on the line segment to the point.
"""
# check if the line has any length
rl = segment[1] - segment[0]
squared_length = vector.squared_length(rl)
if squared_length == 0.0:
return segment[0]
rp = point - segment[0]
# check that / squared_length is correct
dot = vector.dot(rp, rl) / squared_length;
if dot < 0.0:
return segment[0]
elif dot > 1.0:
return segment[1]
# within segment
# perform the same calculation as closest_point_on_line
return segment[0] + (rl * dot)
@all_parameters_as_numpy_arrays
def vector_parallel_vector(v1, v2):
"""Checks if two vectors are parallel.
:param numpy.array v1, v2: The vectors to check.
:rtype: boolean
:return: Returns True if the two vectors are parallel.
"""
# we cross product the 2 vectors
# if the result is 0, then they are parallel
cross = vector3.cross(v1, v2)
return 0 == np.count_nonzero(cross)
@all_parameters_as_numpy_arrays
def ray_parallel_ray(ray1, ray2):
"""Checks if two rays are parallel.
:param numpy.array ray1, ray2: The rays to check.
:rtype: boolean
:return: Returns True if the two rays are parallel.
"""
# we use a cross product in-case the ray direction
# isn't unit length
return vector_parallel_vector(ray1[ 1 ], ray2[ 1 ])
@all_parameters_as_numpy_arrays
def ray_coincident_ray(ray1, ray2):
"""Check if rays are coincident.
Rays must not only be parallel to each other, but reside
along the same vector.
:param numpy.array ray1, ray2: The rays to check.
:rtype: boolean
:return: Returns True if the two rays are co-incident.
"""
# ensure the ray's directions are the same
if ray_parallel_ray(ray1, ray2):
# get the delta between the two ray's start point
delta = ray2[0] - ray1[0]
# get the cross product of the ray delta and
# the direction of the rays
cross = vector3.cross(delta, ray2[1])
# if the cross product is zero, the start of the
# second ray is in line with the direction of the
# first ray
if np.count_nonzero(cross) > 0:
return False
return True
return False
@all_parameters_as_numpy_arrays
def ray_intersect_aabb(ray, aabb):
"""Calculates the intersection point of a ray and an AABB
:param numpy.array ray1: The ray to check.
:param numpy.array aabb: The Axis-Aligned Bounding Box to check against.
:rtype: numpy.array
:return: Returns a vector if an intersection occurs.
Returns None if no intersection occurs.
"""
"""
http://gamedev.stackexchange.com/questions/18436/most-efficient-aabb-vs-ray-collision-algorithms
"""
# this is basically "numpy.divide( 1.0, ray[ 1 ] )"
# except we're trying to avoid a divide by zero warning
# so where the ray direction value is 0.0, just use infinity
# which is what we want anyway
direction = ray[1]
dir_fraction = np.empty(3, dtype = ray.dtype)
dir_fraction[direction == 0.0] = np.inf
dir_fraction[direction != 0.0] = np.divide(1.0, direction[direction != 0.0])
t1 = (aabb[0,0] - ray[0,0]) * dir_fraction[ 0 ]
t2 = (aabb[1,0] - ray[0,0]) * dir_fraction[ 0 ]
t3 = (aabb[0,1] - ray[0,1]) * dir_fraction[ 1 ]
t4 = (aabb[1,1] - ray[0,1]) * dir_fraction[ 1 ]
t5 = (aabb[0,2] - ray[0,2]) * dir_fraction[ 2 ]
t6 = (aabb[1,2] - ray[0,2]) * dir_fraction[ 2 ]
tmin = max(min(t1, t2), min(t3, t4), min(t5, t6))
tmax = min(max(t1, t2), max(t3, t4), max(t5, t6))
# if tmax < 0, ray (line) is intersecting AABB
# but the whole AABB is behind the ray start
if tmax < 0:
return None
# if tmin > tmax, ray doesn't intersect AABB
if tmin > tmax:
return None
# t is the distance from the ray point
# to intersection
t = abs(tmin)
point = ray[0] + (ray[1] * t)
return point
@all_parameters_as_numpy_arrays
def point_height_above_plane(point, plane):
"""Calculates how high a point is above a plane.
:param numpy.array point: The point to check.
:param numpy.array plane: The plane to check.
:rtype: float
:return: The height above the plane as a float. The value will be
negative if the point is behind the plane.
"""
"""
http://www.vitutor.com/geometry/distance/point_plane.html
d(P) = (AX + BY + CZ + D) / sqrt(A^2 + B^2 + C^2)
Normal is unit length, so it's length is 1.0.
Therefore, we can ignore the division all together.
Just perform Pn . [XYZ1]
"""
return np.dot(plane, [point[0], point[1], point[2], 1.0])
@all_parameters_as_numpy_arrays
def point_closest_point_on_plane(point, plane):
"""Calculates the point on a plane that is closest to a point.
:param numpy.array point: The point to check with.
:param numpy.array plane: The infinite plane to check against.
:rtype: numpy.array
:return: The closest point on the plane to the point.
"""
"""
point on plane is defined as:
q' = q + (d - q.n)n
where:
q' is the point on the plane
q is the point we are checking
d is the value of normal dot position
n is the plane normal
"""
n = plane[:3]
p = n * plane[3]
d = np.dot(p, n)
qn = np.dot(point, n)
return point + (n * (d - qn))
@all_parameters_as_numpy_arrays
def sphere_does_intersect_sphere(s1, s2):
"""Checks if two spheres overlap.
Note: This will return True if the two spheres are
touching perfectly but sphere_penetration_sphere
will return 0.0 as the touch but don't penetrate.
This is faster than circle_penetrate_amount_circle
as it avoids a square root calculation.
:param numpy.array s1: The first circle.
:param numpy.array s2: The second circle.
:rtype: boolean
:return: Returns True if the circles overlap.
Otherwise, returns False.
"""
delta = s2[:3] - s1[:3]
distance_squared = vector.squared_length(delta)
radii_squared = math.pow(s1[3] + s2[3], 2.0)
if distance_squared > radii_squared:
return False
return True
@all_parameters_as_numpy_arrays
def sphere_penetration_sphere(s1, s2):
"""Calculates the distance two spheres have penetrated
into one another.
:param numpy.array s1: The first circle.
:param numpy.array s2: The second circle.
:rtype: float
:return: The total overlap of the two spheres.
This is essentially:
r1 + r2 - distance
Where r1 and r2 are the radii of circle 1 and 2
and distance is the length of the vector p2 - p1.
Will return 0.0 if the circles do not overlap.
"""
delta = s2[:3] - s1[:3]
distance = vector.length(delta)
combined_radii = s1[3] + s2[3]
penetration = combined_radii - distance
if penetration <= 0.0:
return 0.0
return penetration
| |
from decimal import Decimal as D
from django.core import exceptions
from django.test import TestCase
import mock
from oscar.apps.offer import models
from oscar.apps.offer.utils import Applicator
from oscar.test.basket import add_product, add_products
from oscar.test import factories
class TestAnAbsoluteDiscountAppliedWithCountConditionOnDifferentRange(TestCase):
def setUp(self):
self.condition_product = factories.ProductFactory()
condition_range = factories.RangeFactory()
condition_range.add_product(self.condition_product)
self.condition = models.CountCondition.objects.create(
range=condition_range,
type=models.Condition.COUNT,
value=2)
self.benefit_product = factories.ProductFactory()
benefit_range = factories.RangeFactory()
benefit_range.add_product(self.benefit_product)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=benefit_range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = models.ConditionalOffer(
id=1, condition=self.condition, benefit=self.benefit)
self.basket = factories.create_basket(empty=True)
self.applicator = Applicator()
def test_succcessful_application_consumes_correctly(self):
add_product(self.basket, product=self.condition_product, quantity=2)
add_product(self.basket, product=self.benefit_product, quantity=1)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
def test_condition_is_consumed_correctly(self):
# Testing an error case reported on the mailing list
add_product(self.basket, product=self.condition_product, quantity=3)
add_product(self.basket, product=self.benefit_product, quantity=2)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
class TestAnAbsoluteDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.offer = mock.Mock()
self.offer.applies_to_tax_exclusive_prices = False
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition_with_one_line(self):
add_product(self.basket, price=D('12.00'), quantity=2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each item in the line
line = self.basket.all_lines()[0]
prices = line.get_price_breakdown()
self.assertEqual(1, len(prices))
self.assertEqual(D('10.50'), prices[0][0])
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('12.00'), 1), (D('12.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each line
for line in self.basket.all_lines():
self.assertEqual(D('1.50'), line.discount_value)
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines_and_lower_total_value(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('1.00'), 1), (D('1.50'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('2.50'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [
(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount_and_higher_prices_first(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscount(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('4.00'))
self.offer = mock.Mock()
self.offer.applies_to_tax_exclusive_prices = False
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_when_discounts_need_rounding(self):
# Split discount across 3 lines
for price in [D('2.00'), D('2.00'), D('2.00')]:
add_product(self.basket, price)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('4.00'), result.discount)
# Check the discount is applied equally to each line
line_discounts = [line.discount_value for line in self.basket.all_lines()]
self.assertEqual(len(line_discounts), 3)
for i, v in enumerate([D('1.33'), D('1.33'), D('1.34')]):
self.assertEqual(line_discounts[i], v)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.offer.applies_to_tax_exclusive_prices = False
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D('12.00'), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_with_smaller_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 2), (D('1.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('1.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = mock.Mock()
self.offer.applies_to_tax_exclusive_prices = False
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.offer.applies_to_tax_exclusive_prices = False
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition_but_with_lower_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 6)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('2.00'), result.discount)
self.assertEqual(5, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountBenefit(TestCase):
def test_requires_a_benefit_value(self):
rng = models.Range.objects.create(
name="", includes_all_products=True)
benefit = models.Benefit.objects.create(
type=models.Benefit.FIXED, range=rng
)
with self.assertRaises(exceptions.ValidationError):
benefit.clean()
| |
import os
import re
import shutil
from zlib import crc32
from collections import defaultdict
from .parser import SourceBuilder, TLParser
class TLGenerator:
def __init__(self, output_dir):
self.output_dir = output_dir
def _get_file(self, *paths):
return os.path.join(self.output_dir, *paths)
def _rm_if_exists(self, filename):
file = self._get_file(filename)
if os.path.exists(file):
if os.path.isdir(file):
shutil.rmtree(file)
else:
os.remove(file)
def tlobjects_exist(self):
"""Determines whether the TLObjects were previously
generated (hence exist) or not
"""
return os.path.isfile(self._get_file('all_tlobjects.py'))
def clean_tlobjects(self):
"""Cleans the automatically generated TLObjects from disk"""
for name in ('functions', 'types', 'all_tlobjects.py'):
self._rm_if_exists(name)
def generate_tlobjects(self, scheme_file, import_depth):
"""Generates all the TLObjects from scheme.tl to
tl/functions and tl/types
"""
# First ensure that the required parent directories exist
os.makedirs(self._get_file('functions'), exist_ok=True)
os.makedirs(self._get_file('types'), exist_ok=True)
# Step 0: Cache the parsed file on a tuple
tlobjects = tuple(TLParser.parse_file(scheme_file))
# Step 1: Ensure that no object has the same name as a namespace
# We must check this because Python will complain if it sees a
# file and a directory with the same name, which happens for
# example with "updates".
#
# We distinguish between function and type namespaces since we
# will later need to perform a relative import for them to be used
function_namespaces = set()
type_namespaces = set()
# Make use of this iteration to also store 'Type: [Constructors]'
type_constructors = defaultdict(list)
for tlobject in tlobjects:
if tlobject.is_function:
if tlobject.namespace:
function_namespaces.add(tlobject.namespace)
else:
type_constructors[tlobject.result].append(tlobject)
if tlobject.namespace:
type_namespaces.add(tlobject.namespace)
# Merge both namespaces to easily check if any namespace exists,
# though we could also distinguish between types and functions
# here, it's not worth doing
namespace_directories = function_namespaces | type_namespaces
for tlobject in tlobjects:
if TLGenerator.get_file_name(tlobject, add_extension=False) \
in namespace_directories:
# If this TLObject isn't under the same directory as its
# name (i.e. "contacts"), append "_tg" to avoid confusion
# between the file and the directory (i.e. "updates")
if tlobject.namespace != tlobject.name:
tlobject.name += '_tg'
# Step 2: Generate the actual code
for tlobject in tlobjects:
# Omit core types, these are embedded in the generated code
if tlobject.is_core_type():
continue
# Determine the output directory and create it
out_dir = self._get_file('functions'
if tlobject.is_function else 'types')
# Path depth to perform relative import
depth = import_depth
if tlobject.namespace:
depth += 1
out_dir = os.path.join(out_dir, tlobject.namespace)
os.makedirs(out_dir, exist_ok=True)
# Add this object to __init__.py, so we can import *
init_py = os.path.join(out_dir, '__init__.py')
with open(init_py, 'a', encoding='utf-8') as file:
with SourceBuilder(file) as builder:
builder.writeln('from .{} import {}'.format(
TLGenerator.get_file_name(tlobject, add_extension=False),
TLGenerator.get_class_name(tlobject)))
# Create the file for this TLObject
filename = os.path.join(out_dir, TLGenerator.get_file_name(
tlobject, add_extension=True
))
with open(filename, 'w', encoding='utf-8') as file:
with SourceBuilder(file) as builder:
TLGenerator._write_source_code(
tlobject, builder, depth, type_constructors)
# Step 3: Add the relative imports to the namespaces on __init__.py's
init_py = self._get_file('functions', '__init__.py')
with open(init_py, 'a') as file:
file.write('from . import {}\n'
.format(', '.join(function_namespaces)))
init_py = self._get_file('types', '__init__.py')
with open(init_py, 'a') as file:
file.write('from . import {}\n'
.format(', '.join(type_namespaces)))
# Step 4: Once all the objects have been generated,
# we can now group them in a single file
filename = os.path.join(self._get_file('all_tlobjects.py'))
with open(filename, 'w', encoding='utf-8') as file:
with SourceBuilder(file) as builder:
builder.writeln(
'"""File generated by TLObjects\' generator. All changes will be ERASED"""')
builder.writeln()
builder.writeln('from . import types, functions')
builder.writeln()
# Create a variable to indicate which layer this is
builder.writeln('layer = {} # Current generated layer'.format(
TLParser.find_layer(scheme_file)))
builder.writeln()
# Then create the dictionary containing constructor_id: class
builder.writeln('tlobjects = {')
builder.current_indent += 1
# Fill the dictionary (0x1a2b3c4f: tl.full.type.path.Class)
for tlobject in tlobjects:
constructor = hex(tlobject.id)
if len(constructor) != 10:
# Make it a nice length 10 so it fits well
constructor = '0x' + constructor[2:].zfill(8)
builder.write('{}: '.format(constructor))
builder.write(
'functions' if tlobject.is_function else 'types')
if tlobject.namespace:
builder.write('.' + tlobject.namespace)
builder.writeln('.{},'.format(
TLGenerator.get_class_name(tlobject)))
builder.current_indent -= 1
builder.writeln('}')
@staticmethod
def _write_source_code(tlobject, builder, depth, type_constructors):
"""Writes the source code corresponding to the given TLObject
by making use of the 'builder' SourceBuilder.
Additional information such as file path depth and
the Type: [Constructors] must be given for proper
importing and documentation strings.
'"""
# Both types and functions inherit from the TLObject class so they
# all can be serialized and sent, however, only the functions are
# "content_related".
builder.writeln('from {}.tl.tlobject import TLObject'
.format('.' * depth))
if tlobject.is_function:
util_imports = set()
for a in tlobject.args:
# We can automatically convert some "full" types to
# "input only" (like User -> InputPeerUser, etc.)
if a.type == 'InputPeer':
util_imports.add('get_input_peer')
elif a.type == 'InputChannel':
util_imports.add('get_input_channel')
elif a.type == 'InputUser':
util_imports.add('get_input_user')
if util_imports:
builder.writeln('from {}.utils import {}'.format(
'.' * depth, ', '.join(util_imports)))
if any(a for a in tlobject.args if a.can_be_inferred):
# Currently only 'random_id' needs 'os' to be imported
builder.writeln('import os')
builder.writeln()
builder.writeln()
builder.writeln('class {}(TLObject):'.format(
TLGenerator.get_class_name(tlobject)))
# Write the original .tl definition,
# along with a "generated automatically" message
builder.writeln(
'"""Class generated by TLObjects\' generator. '
'All changes will be ERASED. TL definition below.'
)
builder.writeln('{}"""'.format(repr(tlobject)))
builder.writeln()
# Class-level variable to store its constructor ID
builder.writeln("# Telegram's constructor (U)ID for this class")
builder.writeln('constructor_id = {}'.format(hex(tlobject.id)))
builder.writeln("# Also the ID of its resulting type for fast checks")
builder.writeln('subclass_of_id = {}'.format(
hex(crc32(tlobject.result.encode('ascii')))))
builder.writeln()
# Flag arguments must go last
args = [
a for a in tlobject.sorted_args()
if not a.flag_indicator and not a.generic_definition
]
# Convert the args to string parameters, flags having =None
args = [
(a.name if not a.is_flag and not a.can_be_inferred
else '{}=None'.format(a.name))
for a in args
]
# Write the __init__ function
if args:
builder.writeln(
'def __init__(self, {}):'.format(', '.join(args))
)
else:
builder.writeln('def __init__(self):')
# Now update args to have the TLObject arguments, _except_
# those which are calculated on send or ignored, this is
# flag indicator and generic definitions.
#
# We don't need the generic definitions in Python
# because arguments can be any type
args = [arg for arg in tlobject.args
if not arg.flag_indicator and
not arg.generic_definition]
if args:
# Write the docstring, to know the type of the args
builder.writeln('"""')
for arg in args:
if not arg.flag_indicator:
builder.write(
':param {}: Telegram type: "{}".'
.format(arg.name, arg.type)
)
if arg.is_vector:
builder.write(' Must be a list.'.format(arg.name))
if arg.is_generic:
builder.write(' Must be another TLObject request.')
builder.writeln()
# We also want to know what type this request returns
# or to which type this constructor belongs to
builder.writeln()
if tlobject.is_function:
builder.write(':returns {}: '.format(tlobject.result))
else:
builder.write('Constructor for {}: '.format(tlobject.result))
constructors = type_constructors[tlobject.result]
if not constructors:
builder.writeln('This type has no constructors.')
elif len(constructors) == 1:
builder.writeln('Instance of {}.'.format(
TLGenerator.get_class_name(constructors[0])
))
else:
builder.writeln('Instance of either {}.'.format(
', '.join(TLGenerator.get_class_name(c)
for c in constructors)
))
builder.writeln('"""')
builder.writeln('super().__init__()')
# Functions have a result object and are confirmed by default
if tlobject.is_function:
builder.writeln('self.result = None')
builder.writeln(
'self.content_related = True')
# Set the arguments
if args:
# Leave an empty line if there are any args
builder.writeln()
for arg in args:
if arg.can_be_inferred:
# Currently the only argument that can be
# inferred are those called 'random_id'
if arg.name == 'random_id':
builder.writeln(
"self.random_id = random_id if random_id "
"is not None else int.from_bytes("
"os.urandom({}), signed=True, "
"byteorder='little')"
.format(8 if arg.type == 'long' else 4)
)
else:
raise ValueError('Cannot infer a value for ', arg)
# Well-known cases, auto-cast it to the right type
elif arg.type == 'InputPeer' and tlobject.is_function:
TLGenerator.write_get_input(builder, arg, 'get_input_peer')
elif arg.type == 'InputChannel' and tlobject.is_function:
TLGenerator.write_get_input(builder, arg, 'get_input_channel')
elif arg.type == 'InputUser' and tlobject.is_function:
TLGenerator.write_get_input(builder, arg, 'get_input_user')
else:
builder.writeln('self.{0} = {0}'.format(arg.name))
builder.end_block()
# Write the to_dict(self) method
if args:
builder.writeln('def to_dict(self):')
builder.writeln('return {')
builder.current_indent += 1
base_types = ('string', 'bytes', 'int', 'long', 'int128',
'int256', 'double', 'Bool', 'true', 'date')
for arg in args:
builder.write("'{}': ".format(arg.name))
if arg.type in base_types:
if arg.is_vector:
builder.write(
'[] if self.{0} is None else self.{0}[:]'
.format(arg.name)
)
else:
builder.write('self.{}'.format(arg.name))
else:
if arg.is_vector:
builder.write(
'[] if self.{0} is None else [None '
'if x is None else x.to_dict() for x in self.{0}]'
.format(arg.name)
)
else:
builder.write(
'None if self.{0} is None else self.{0}.to_dict()'
.format(arg.name)
)
builder.writeln(',')
builder.current_indent -= 1
builder.writeln("}")
else:
builder.writeln('@staticmethod')
builder.writeln('def to_dict():')
builder.writeln('return {}')
builder.end_block()
# Write the on_send(self, writer) function
builder.writeln('def on_send(self, writer):')
builder.writeln(
'writer.write_int({}.constructor_id, signed=False)'
.format(TLGenerator.get_class_name(tlobject)))
for arg in tlobject.args:
TLGenerator.write_onsend_code(builder, arg,
tlobject.args)
builder.end_block()
# Write the empty() function, which returns an "empty"
# instance, in which all attributes are set to None
builder.writeln('@staticmethod')
builder.writeln('def empty():')
builder.writeln(
'"""Returns an "empty" instance (attributes=None)"""')
builder.writeln('return {}({})'.format(
TLGenerator.get_class_name(tlobject), ', '.join(
'None' for _ in range(len(args)))))
builder.end_block()
# Write the on_response(self, reader) function
builder.writeln('def on_response(self, reader):')
# Do not read constructor's ID, since
# that's already been read somewhere else
if tlobject.is_function:
TLGenerator.write_request_result_code(builder, tlobject)
else:
if tlobject.args:
for arg in tlobject.args:
TLGenerator.write_onresponse_code(
builder, arg, tlobject.args)
else:
# If there were no arguments, we still need an
# on_response method, and hence "pass" if empty
builder.writeln('pass')
builder.end_block()
# Write the __repr__(self) and __str__(self) functions
builder.writeln('def __repr__(self):')
builder.writeln("return '{}'".format(repr(tlobject)))
builder.end_block()
builder.writeln('def __str__(self):')
builder.writeln('return TLObject.pretty_format(self)')
builder.end_block()
builder.writeln('def stringify(self):')
builder.writeln('return TLObject.pretty_format(self, indent=0)')
# builder.end_block() # No need to end the last block
@staticmethod
def write_get_input(builder, arg, get_input_code):
"""Returns "True" if the get_input_* code was written when assigning
a parameter upon creating the request. Returns False otherwise
"""
if arg.is_vector:
builder.writeln(
'self.{0} = [{1}(_x) for _x in {0}]'
.format(arg.name, get_input_code)
)
pass
else:
builder.writeln(
'self.{0} = {1}({0})'.format(arg.name, get_input_code)
)
@staticmethod
def get_class_name(tlobject):
"""Gets the class name following the Python style guidelines, in ThisClassFormat"""
# Courtesy of http://stackoverflow.com/a/31531797/4759433
# Also, '_' could be replaced for ' ', then use .title(), and then remove ' '
result = re.sub(r'_([a-z])', lambda m: m.group(1).upper(),
tlobject.name)
result = result[:1].upper() + result[1:].replace(
'_', '') # Replace again to fully ensure!
# If it's a function, let it end with "Request" to identify them more easily
if tlobject.is_function:
result += 'Request'
return result
@staticmethod
def get_file_name(tlobject, add_extension=False):
"""Gets the file name in file_name_format.py for the given TLObject"""
# Courtesy of http://stackoverflow.com/a/1176023/4759433
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', tlobject.name)
result = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
if add_extension:
return result + '.py'
else:
return result
@staticmethod
def write_onsend_code(builder, arg, args, name=None):
"""
Writes the write code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same on_send. This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
if name is None:
name = 'self.{}'.format(arg.name)
# The argument may be a flag, only write if it's not None AND if it's not a True type
# True types are not actually sent, but instead only used to determine the flags
if arg.is_flag:
if arg.type == 'true':
return # Exit, since True type is never written
else:
builder.writeln('if {}:'.format(name))
if arg.is_vector:
if arg.use_vector_id:
builder.writeln(
"writer.write_int(0x1cb5c415, signed=False) # Vector's constructor ID")
builder.writeln('writer.write_int(len({}))'.format(name))
builder.writeln('for _x in {}:'.format(name))
# Temporary disable .is_vector, not to enter this if again
arg.is_vector = False
TLGenerator.write_onsend_code(builder, arg, args, name='_x')
arg.is_vector = True
elif arg.flag_indicator:
# Calculate the flags with those items which are not None
builder.writeln(
'# Calculate the flags. This equals to those flag arguments which are NOT None')
builder.writeln('flags = 0')
for flag in args:
if flag.is_flag:
builder.writeln('flags |= (1 << {}) if {} else 0'.format(
flag.flag_index, 'self.{}'.format(flag.name)))
builder.writeln('writer.write_int(flags)')
builder.writeln()
elif 'int' == arg.type:
builder.writeln('writer.write_int({})'.format(name))
elif 'long' == arg.type:
builder.writeln('writer.write_long({})'.format(name))
elif 'int128' == arg.type:
builder.writeln('writer.write_large_int({}, bits=128)'.format(
name))
elif 'int256' == arg.type:
builder.writeln('writer.write_large_int({}, bits=256)'.format(
name))
elif 'double' == arg.type:
builder.writeln('writer.write_double({})'.format(name))
elif 'string' == arg.type:
builder.writeln('writer.tgwrite_string({})'.format(name))
elif 'Bool' == arg.type:
builder.writeln('writer.tgwrite_bool({})'.format(name))
elif 'true' == arg.type: # Awkwardly enough, Telegram has both bool and "true", used in flags
pass # These are actually NOT written! Only used for flags
elif 'bytes' == arg.type:
builder.writeln('writer.tgwrite_bytes({})'.format(name))
elif 'date' == arg.type: # Custom format
builder.writeln('writer.tgwrite_date({})'.format(name))
else:
# Else it may be a custom type
builder.writeln('{}.on_send(writer)'.format(name))
# End vector and flag blocks if required (if we opened them before)
if arg.is_vector:
builder.end_block()
if arg.is_flag:
builder.end_block()
@staticmethod
def write_onresponse_code(builder, arg, args, name=None):
"""
Writes the receive code for the given argument
:param builder: The source code builder
:param arg: The argument to write
:param args: All the other arguments in TLObject same on_send. This is required to determine the flags value
:param name: The name of the argument. Defaults to "self.argname"
This argument is an option because it's required when writing Vectors<>
"""
if arg.generic_definition:
return # Do nothing, this only specifies a later type
if name is None:
name = 'self.{}'.format(arg.name)
# The argument may be a flag, only write that flag was given!
was_flag = False
if arg.is_flag:
was_flag = True
builder.writeln('if (flags & (1 << {})) != 0:'.format(
arg.flag_index))
# Temporary disable .is_flag not to enter this if again when calling the method recursively
arg.is_flag = False
if arg.is_vector:
if arg.use_vector_id:
builder.writeln("reader.read_int() # Vector's constructor ID")
builder.writeln('{} = [] # Initialize an empty list'.format(name))
builder.writeln('_len = reader.read_int()')
builder.writeln('for _ in range(_len):')
# Temporary disable .is_vector, not to enter this if again
arg.is_vector = False
TLGenerator.write_onresponse_code(builder, arg, args, name='_x')
builder.writeln('{}.append(_x)'.format(name))
arg.is_vector = True
elif arg.flag_indicator:
# Read the flags, which will indicate what items we should read next
builder.writeln('flags = reader.read_int()')
builder.writeln()
elif 'int' == arg.type:
builder.writeln('{} = reader.read_int()'.format(name))
elif 'long' == arg.type:
builder.writeln('{} = reader.read_long()'.format(name))
elif 'int128' == arg.type:
builder.writeln(
'{} = reader.read_large_int(bits=128)'.format(name)
)
elif 'int256' == arg.type:
builder.writeln(
'{} = reader.read_large_int(bits=256)'.format(name)
)
elif 'double' == arg.type:
builder.writeln('{} = reader.read_double()'.format(name))
elif 'string' == arg.type:
builder.writeln('{} = reader.tgread_string()'.format(name))
elif 'Bool' == arg.type:
builder.writeln('{} = reader.tgread_bool()'.format(name))
elif 'true' == arg.type: # Awkwardly enough, Telegram has both bool and "true", used in flags
builder.writeln(
'{} = True # Arbitrary not-None value, no need to read since it is a flag'.
format(name))
elif 'bytes' == arg.type:
builder.writeln('{} = reader.tgread_bytes()'.format(name))
elif 'date' == arg.type: # Custom format
builder.writeln('{} = reader.tgread_date()'.format(name))
else:
# Else it may be a custom type
builder.writeln('{} = reader.tgread_object()'.format(name))
# End vector and flag blocks if required (if we opened them before)
if arg.is_vector:
builder.end_block()
if was_flag:
builder.end_block()
# Restore .is_flag
arg.is_flag = True
@staticmethod
def write_request_result_code(builder, tlobject):
"""
Writes the receive code for the given function
:param builder: The source code builder
:param tlobject: The TLObject for which the 'self.result = ' will be written
"""
if tlobject.result.startswith('Vector<'):
# Vector results are a bit special since they can also be composed
# of integer values and such; however, the result of requests is
# not parsed as arguments are and it's a bit harder to tell which
# is which.
if tlobject.result == 'Vector<int>':
builder.writeln('reader.read_int() # Vector id')
builder.writeln('count = reader.read_int()')
builder.writeln('self.result = [reader.read_int() for _ in range(count)]')
elif tlobject.result == 'Vector<long>':
builder.writeln('reader.read_int() # Vector id')
builder.writeln('count = reader.read_long()')
builder.writeln('self.result = [reader.read_long() for _ in range(count)]')
else:
builder.writeln('self.result = reader.tgread_vector()')
else:
builder.writeln('self.result = reader.tgread_object()')
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import build_info
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the associated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._origin_step_stats = step_stats
self._step_stats = None
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: name = op(arg, arg, ...).
match = re.match(r'(.*) = (.*)\((.*)\)', label)
if match is None:
return 'unknown', 'unknown', []
nn, op, inputs = match.groups()
if not inputs:
inputs = []
else:
inputs = inputs.split(', ')
return nn, op, inputs
def _parse_kernel_label(self, label, node_name):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: retval (arg) detail @@annotation
start = label.find('@@')
end = label.find('#')
if start >= 0 and end >= 0 and start + 2 < end:
node_name = label[start + 2:end]
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
name, op = fields[:2]
return name, op
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
inputs = []
if is_gputrace:
node_name, op = self._parse_kernel_label(nodestats.timeline_label,
node_name)
elif node_name == 'RecvTensor':
# RPC tracing does not use the standard timeline_label format.
op = 'RecvTensor'
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
if build_info.build_info['is_rocm_build']:
args['kernel'] = nodestats.timeline_label.split('@@')[0]
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in sorted(
alloc_list, key=lambda allocation: allocation[0]):
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def _preprocess_op_time(self, op_time):
"""Update the start and end time of ops in step stats.
Args:
op_time: How the execution time of op is shown in timeline. Possible values
are "schedule", "gpu" and "all". "schedule" will show op from the time it
is scheduled to the end of the scheduling. Notice by the end of its
scheduling its async kernels may not start yet. It is shown using the
default value from step_stats. "gpu" will show op with the execution time
of its kernels on GPU. "all" will show op from the start of its scheduling
to the end of its last kernel.
"""
if op_time == 'schedule':
self._step_stats = self._origin_step_stats
return
self._step_stats = copy.deepcopy(self._origin_step_stats)
# Separate job task and gpu tracer stream
stream_all_stats = []
job_stats = []
for stats in self._step_stats.dev_stats:
if '/stream:all' in stats.device:
stream_all_stats.append(stats)
elif '/job' in stats.device:
job_stats.append(stats)
# Record the start time of the first kernel and the end time of
# the last gpu kernel for all ops.
op_gpu_start = {}
op_gpu_end = {}
for stats in stream_all_stats:
for kernel in stats.node_stats:
name, _ = self._parse_kernel_label(kernel.timeline_label,
kernel.node_name)
start = kernel.all_start_micros
end = kernel.all_start_micros + kernel.all_end_rel_micros
if name in op_gpu_start:
op_gpu_start[name] = min(op_gpu_start[name], start)
op_gpu_end[name] = max(op_gpu_end[name], end)
else:
op_gpu_start[name] = start
op_gpu_end[name] = end
# Update the start and end time of each op according to the op_time
for stats in job_stats:
for op in stats.node_stats:
if op.node_name in op_gpu_start:
end = max(op_gpu_end[op.node_name],
op.all_start_micros + op.all_end_rel_micros)
if op_time == 'gpu':
op.all_start_micros = op_gpu_start[op.node_name]
op.all_end_rel_micros = end - op.all_start_micros
def analyze_step_stats(self,
show_dataflow=True,
show_memory=True,
op_time='schedule'):
"""Analyze the step stats and format it into Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all". "schedule" will show op
from the time it is scheduled to the end of the scheduling. Notice by
the end of its scheduling its async kernels may not start yet. It is
shown using the default value from step_stats. "gpu" will show op with
the execution time of its kernels on GPU. "all" will show op from the
start of its scheduling to the end of its last kernel.
Returns:
A 'StepStatsAnalysis' object.
"""
self._preprocess_op_time(op_time)
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self,
show_dataflow=True,
show_memory=False,
op_time='schedule'):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all".
"schedule" will show op from the time it is scheduled to the end of
the scheduling.
Notice by the end of its scheduling its async kernels may not start
yet. It is shown using the default value from step_stats.
"gpu" will show op with the execution time of its kernels on GPU.
"all" will show op from the start of its scheduling to the end of
its last kernel.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
| |
import re
import os
from itertools import izip_longest
import arcpy
from ..exceptions import MapLayerError, DataSourceUpdateError, UnmappedDataSourceError, UnsupportedLayerError, \
ChangeDataSourcesError, MapDataSourcesBrokenError, ServDefDraftCreateError
from _sddraft import SDDraft
def change_data_sources(map, data_sources):
""" """
errors = []
data_tables = arcpy.mapping.ListTableViews(map)
layers_by_df = [arcpy.mapping.ListLayers(df) for df in arcpy.mapping.ListDataFrames(map)]
if not 'layers' in data_sources or not 'tableViews' in data_sources:
raise ChangeDataSourcesError("Data sources dictionary does not contain both 'layers' and 'tableViews' keys")
for layers, layer_sources in izip_longest(layers_by_df, data_sources["layers"]):
if layer_sources == None or len(layers) != len(layer_sources):
raise ChangeDataSourcesError("Number of layers does not match number of data sources.")
for layer, layer_source in izip_longest(layers, layer_sources):
try:
if layer.isGroupLayer or layer_source == None:
continue
if not layer.supports("dataSource") or not layer.supports("workspacePath"):
#error on layers that we can't change
raise UnsupportedLayerError(layer = layer)
if layer.supports("dataSource"):
print(u"Layer '{0}': Current datasource: '{1}'".format(layer.longName, layer.dataSource).encode("ascii", "ignore"))
print(u"Layer '{0}': Attempting to change workspace path".format(layer.longName).encode("ascii", "ignore"))
_change_data_source(layer, layer_source["workspacePath"], layer_source.get("datasetName"), layer_source.get("workspaceType"), layer_source.get("schema"))
print(u"Layer '{0}': Workspace path updated to: '{1}'".format(layer.name, layer_source["workspacePath"]).encode("ascii", "ignore"))
if layer.supports("dataSource"):
print(u"Layer '{0}': New datasource: '{1}'".format(layer.longName, layer.dataSource).encode("ascii", "ignore"))
except MapLayerError as mle:
errors.append(mle)
if not len(data_tables) == len(data_sources['tableViews']):
raise ChangeDataSourcesError("Number of data tables does not match number of data table data sources.")
for data_table, layer_source in izip_longest(data_tables, data_sources['tableViews']):
try:
if layer_source == None:
continue
print(u"Data Table '{0}': Attempting to change workspace path".format(data_table.name).encode("ascii", "ignore"))
_change_data_source(data_table, layer_source["workspacePath"], layer_source.get("datasetName"), layer_source.get("workspaceType"), layer_source.get("schema"))
print(u"Data Table '{0}': Workspace path updated to: '{1}'".format(data_table.name, layer_source["workspacePath"]).encode("ascii", "ignore"))
except MapLayerError as mle:
errors.append(mle)
if not len(errors) == 0:
raise ChangeDataSourcesError("A number of errors were encountered whilst change layer data sources.", errors)
def create_replacement_data_sources_list(document_data_sources_list, data_source_templates, raise_exception_no_change = False):
template_sets = [dict(template.items() + [("matchCriteria", set(template["matchCriteria"].items()))]) for template in data_source_templates]
def match_new_data_source(item):
if item == None:
return None
new_conn = None
for template in template_sets:
if template["matchCriteria"].issubset(set(item.items())):
new_conn = template["dataSource"]
break
if new_conn == None and raise_exception_no_change:
raise RuntimeError("No matching data source was found for layer")
return new_conn
return {
"layers": [[match_new_data_source(layer) for layer in df] for df in document_data_sources_list["layers"]],
"tableViews": [match_new_data_source(table) for table in document_data_sources_list["tableViews"]]
}
def convert_map_to_service_draft(map, sd_draft_path, service_name, folder_name = None, summary = None):
def check_analysis(analysis):
if not analysis["errors"] == {}:
err_message_list = []
for ((message, code), layerlist) in analysis["errors"].iteritems():
if layerlist == None:
err_message_list.append("{message} (CODE {code})".format(message = message, code = code))
else:
err_message_list.append("{message} (CODE {code}) applies to: {layers}".format(
message = message, code = code, layers = ", ".join([layer.name for layer in layerlist])))
raise ServDefDraftCreateError("Analysis Errors: \n{errs}".format(errs = "\n".join(err_message_list)))
if not validate_map(map):
raise MapDataSourcesBrokenError()
if os.path.exists(sd_draft_path):
os.remove(sd_draft_path)
analysis = arcpy.mapping.CreateMapSDDraft(map, sd_draft_path, service_name, server_type = "ARCGIS_SERVER",
copy_data_to_server = False, folder_name = folder_name, summary = summary)
check_analysis(analysis)
analysis = arcpy.mapping.AnalyzeForSD(sd_draft_path)
check_analysis(analysis)
return SDDraft(sd_draft_path)
def convert_service_draft_to_staged_service(sd_draft, sd_path):
if os.path.exists(sd_path):
os.remove(sd_path)
if isinstance(sd_draft, basestring):
arcpy.StageService_server(sd_draft, sd_path)
else:
arcpy.StageService_server(sd_draft.file_path, sd_path)
def list_document_data_sources(map):
"""List the data sources for each layer or table view of the specified map.
Outputs a dictionary containing two keys, "layers" and "tableViews".
"layers" contains an array, with each element representing a data frame (as another array) that contains a
dictionary of layer details relevant to that layer's connection to its data source.
"tableViews" is also an array, where each element is a dictionary of table view details relevant to that table
view's connection to its data source.
The order of array elements is as displayed in the ArcMap table of contents.
An example of the output format is the following::
{
"layers": [
[
# Data frame number one
{
# Layer number one
"name": "Layer Name",
"longName": "Layer Group/Layer Name",
"datasetName": "(Optional) dataset name",
"dataSource": "(Optional) data source name",
"serviceType": "(Optional) service type, e.g. SDE, MapServer, IMS",
"userName": "(Optional) user name",
"server": "(Optional) server address/hostname",
"service": "(Optional) name or number of the ArcSDE Service",
"database": "(Optional) name of the database",
"workspacePath": "(Optional) workspace path"
},
# ...more layers
],
# ...more data frames
],
"tableViews": [
{
"datasetName": "dataset name",
"dataSource": "data source",
"definitionQuery": "definition query on the table",
"workspacePath": "workspace path"
}
]
}
:param map: The map to gather data source connection details about
:type map: arcpy.mapping.MapDocument
:returns: dict
"""
return {
"layers": [[_get_layer_details(layer) for layer in arcpy.mapping.ListLayers(df)] for df in arcpy.mapping.ListDataFrames(map)],
"tableViews": [_get_table_details(table) for table in arcpy.mapping.ListTableViews(map)]
}
def validate_map(map):
"""Analyse the map for broken layers and return a boolean indicating if it is in a valid state or not.
Lists broken layers on the shell output.
:param map: The map to be validated
:type map: arcpy.mapping.MapDocument
:returns: Boolean, True if valid, False if there are one or more broken layers
"""
broken_layers = arcpy.mapping.ListBrokenDataSources(map)
if len(broken_layers) > 0:
print(u"Map '{0}': Broken data sources:".format(map.title))
for layer in broken_layers:
print(u" {0}".format(layer.name))
if not hasattr(layer, "supports"):
#probably a TableView
print(u" workspace: {0}".format(layer.workspacePath))
print(u" datasource: {0}".format(layer.dataSource))
continue
#Some sort of layer
if layer.supports("workspacePath"):
print(u" workspace: {0}".format(layer.workspacePath))
if layer.supports("dataSource"):
print(u" datasource: {0}".format(layer.dataSource))
return False
return True
def _change_data_source(layer, workspace_path, dataset_name = None, workspace_type = None, schema = None):
try:
if ((not hasattr(layer, "supports") or layer.supports("workspacePath")) and
(dataset_name == None and workspace_type == None and schema == None)):
# Tests if layer is actually a layer object (i.e. has a "support" function) or table view (which doesn't,
# but always supports "workspacePath"). Can't test on type (arcpy.mapping.TableView) as that doesn't work
# on ArcPy 10.0
layer.findAndReplaceWorkspacePath("", workspace_path, validate = False)
return
kwargs = { "validate": False }
if dataset_name == None and hasattr(layer, "supports") and layer.supports("datasetName"):
if layer.supports("workspacePath"):
dataset_name = layer.dataSource.replace(layer.workspacePath, "")
else:
dataset_name = layer.datasetName
if dataset_name != None:
if (schema != None):
ds_user, ds_name, fc_user, fc_name = _parse_data_source(dataset_name)
dataset_name = "{0}.{1}".format(schema, fc_name)
kwargs["dataset_name"] = dataset_name
if workspace_type != None:
kwargs["workspace_type"] = workspace_type
layer.replaceDataSource(workspace_path, **kwargs)
except Exception, e:
raise DataSourceUpdateError("Exception raised internally by ArcPy", layer, e)
if hasattr(layer, "isBroken") and layer.isBroken:
raise DataSourceUpdateError("Layer is now broken.", layer)
def _get_layer_details(layer):
if layer.isGroupLayer:
return None
details = {
"name": layer.name,
"longName": layer.longName
}
if layer.supports("datasetName"):
details["datasetName"] = layer.datasetName
if layer.supports("dataSource"):
details["dataSource"] = layer.dataSource
if layer.supports("serviceProperties"):
details["serviceType"] = layer.serviceProperties["ServiceType"]
details["userName"] = layer.serviceProperties["UserName"]
if layer.serviceProperties["ServiceType"].upper() == "SDE":
details["server"] = layer.serviceProperties["Server"]
details["service"] = layer.serviceProperties["Service"]
details["database"] = layer.serviceProperties["Database"]
if layer.supports("workspacePath"):
details["workspacePath"] = layer.workspacePath
return details
def _get_table_details(table):
return {
"datasetName": table.datasetName,
"dataSource": table.dataSource,
"definitionQuery": table.definitionQuery,
"workspacePath": table.workspacePath
}
def _parse_data_source(data_source):
"""Takes a string describing a data source and returns a four-part tuple describing the dataset username, dataset
name, feature class username and feature class name"""
dataset_regex = re.compile(
r"^(?:\\)?(?P<ds_user>[\w]*?)(?:\.)?(?P<ds_name>[\w]*?(?=\\))(?:\\)?(?P<fc_user>[\w]*?(?=\.))(?:\.)(?P<fc_name>[\w]*?)$",
re.IGNORECASE)
r = dataset_regex.search(data_source)
if r == None:
feature_class_regex = re.compile(
r"^(?:\\)?(?P<fc_user>[\w]*?(?=\.))(?:\.)(?P<fc_name>[\w]*?)$",
re.IGNORECASE)
r = feature_class_regex.search(data_source)
if r == None:
return (None, None, None, data_source)
r = r.groupdict()
return (r.get("ds_user"), r.get("ds_name"), r.get("fc_user"), r.get("fc_name"))
| |
import test_support
import os
class IncludeStyleRuleTest(test_support.TestBase):
def setUp(self):
self.set_default_rules_selection(['IncludeStyleRule'])
self.set_default_error_id('include style')
self.set_default_error_severity('style')
def assert_colobot_lint_result_with_project_headers(self,
main_file_lines,
main_file,
project_header_files,
system_header_files,
expected_errors):
main_file = os.path.join('project', main_file)
source_files_data = { main_file: main_file_lines }
for project_header_file in project_header_files:
source_files_data[os.path.join('project', project_header_file)] = []
for system_header_file in system_header_files:
source_files_data[os.path.join('system', system_header_file)] = []
self.assert_colobot_lint_result_with_custom_files(
source_files_data = source_files_data,
compilation_database_files = [main_file],
target_files = [main_file],
additional_compile_flags = ['-I$TEMP_DIR/project', '-I$TEMP_DIR/system'],
additional_options = ['-project-local-include-path', '$TEMP_DIR/project'],
expected_errors = expected_errors)
def assert_colobot_lint_result_with_project_headers_and_fake_header_source(self,
main_file_lines,
main_file,
project_headers,
system_header_files,
expected_errors):
fake_header_sources_dir = os.path.join('project', 'fake_header_sources')
source_files_data = dict()
main_file_without_ext, _ = os.path.splitext(main_file)
fake_header_source_file = os.path.join(fake_header_sources_dir, main_file_without_ext + '.cpp')
source_files_data[fake_header_source_file] = ['#include "{0}"'.format(main_file)]
main_file = os.path.join('project', main_file)
source_files_data[main_file] = main_file_lines
for project_header_file in project_headers.keys():
source_files_data[os.path.join('project', project_header_file)] = project_headers[project_header_file]
for system_header_file in system_header_files:
source_files_data[os.path.join('system', system_header_file)] = []
self.assert_colobot_lint_result_with_custom_files(
source_files_data = source_files_data,
compilation_database_files = [fake_header_source_file],
target_files = [fake_header_source_file],
additional_compile_flags = ['-I$TEMP_DIR/project', '-I$TEMP_DIR/system', '-I$TEMP_DIR/' + fake_header_sources_dir],
additional_options = ['-project-local-include-path', '$TEMP_DIR/project'],
expected_errors = expected_errors)
def test_no_includes(self):
self.assert_colobot_lint_result(
source_file_lines = [
''
],
expected_errors = [])
def test_local_includes_sorted_alphabetically(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [])
def test_local_includes_not_sorted_alphabetically(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/def.h"',
'#include "def/abc.h"',
],
main_file = 'src.cpp',
project_header_files = [
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Broken alphabetical ordering, expected 'def/abc.h', not 'def/def.h'",
'line': '4'
}
])
def test_local_includes_from_different_subpaths_in_one_block(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected empty line between include directives",
'line': '3'
}
])
def test_system_includes_dont_need_to_be_sorted_alphabetically(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include <system_header2.h>',
'#include <system_header1.h>'
],
main_file = 'src.cpp',
project_header_files = [],
system_header_files = [
'system_header1.h',
'system_header2.h'
],
expected_errors = [])
def test_local_include_in_angle_brackets(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'',
'#include <def/abc.h>',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Local include 'def/abc.h' should be included with quotes, not angled brackets",
'line': '4'
}
])
def test_global_include_in_quotes(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include <system_header1.h>',
'#include "system_header2.h"',
'#include <system_header3.h>',
],
main_file = 'src.cpp',
project_header_files = [],
system_header_files = [
'system_header1.h',
'system_header2.h',
'system_header3.h',
],
expected_errors = [
{
'msg': "Global include 'system_header2.h' should be included with angled brackets, not quotes",
'line': '2'
}
])
def test_local_include_not_full_path_from_project_root(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'#include "jkl.h"',
'',
'#include "def/ghi.h"',
],
main_file = 'def/mno.cpp',
project_header_files = [
'abc.h',
'def.h',
'def/ghi.h',
'def/jkl.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected local include to be full relative path from project local include search path: 'def/jkl.h', not 'jkl.h'",
'line': '3'
}
])
def test_local_and_global_includes_in_one_block(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'#include <system_header1.h>',
'#include <system_header2.h>'
],
main_file = 'def/ghi.cpp',
project_header_files = [
'abc.h',
'def.h'
],
system_header_files = [
'system_header1.h',
'system_header2.h',
],
expected_errors = [
{
'msg': "Expected empty line between include directives",
'line': '3'
}
])
def test_local_and_global_includes_in_separate_blocks(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'',
'#include <system_header1.h>',
'#include <system_header2.h>',
],
main_file = 'def/ghi.cpp',
project_header_files = [
'abc.h',
'def.h'
],
system_header_files = [
'system_header1.h',
'system_header2.h'
],
expected_errors = [])
def test_local_include_after_global_include(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'',
'#include <system_header1.h>',
'#include <system_header2.h>',
'#include "def.h"'
],
main_file = 'def/ghi.cpp',
project_header_files = [
'abc.h',
'def.h'
],
system_header_files = [
'system_header1.h',
'system_header2.h'
],
expected_errors = [
{
'msg': "Local include 'def.h' should not be placed after global includes",
'line': '5'
}
])
def test_config_header_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "config/config.h"',
'',
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'config/config.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [])
def test_config_header_in_one_block_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "config/config.h"',
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'config/config.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected empty line between include directives",
'line': '2'
}
])
def test_config_header_not_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "def.h"',
'',
'#include "config/config.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'config/config.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected config include directive: 'config/config.h', not 'abc.h'",
'line': '1'
},
{
'msg': "Expected empty line between include directives",
'line': '2'
}
])
def test_matching_header_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "src.h"',
'',
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'src.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [])
def test_matching_header_in_one_block(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "src.h"',
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'src.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected empty line between include directives",
'line': '2'
}
])
def test_matching_header_and_config_file(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "src.h"',
'',
'#include "config.h"',
'',
'#include "abc.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'config.h',
'src.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [])
def test_matching_header_not_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers(
main_file_lines = [
'#include "abc.h"',
'#include "src.h"',
'#include "def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
],
main_file = 'src.cpp',
project_header_files = [
'src.h',
'abc.h',
'def.h',
'def/abc.h',
'def/def.h'
],
system_header_files = [],
expected_errors = [
{
'msg': "Expected first include directive to be matching header file: 'src.h', not 'abc.h'",
'line': '1'
},
{
'msg': "Expected empty line between include directives",
'line': '2'
},
{
'msg': "Broken alphabetical ordering, expected 'def.h', not 'src.h'",
'line': '2'
}
])
def test_base_class_header_at_the_top(self):
self.assert_colobot_lint_result_with_project_headers_and_fake_header_source(
main_file_lines = [
'#include "def/base.h"',
'',
'#include "abc/abc.h"',
'#include "abc/def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
'',
'class Derived : public Base {};'
],
main_file = 'def/src.h',
project_headers = {
'abc/abc.h': [],
'abc/def.h': [],
'def/abc.h': [],
'def/def.h': [],
'def/base.h': [
'class Base {};'
]
},
system_header_files = [],
expected_errors = [])
def test_base_class_header_in_one_block(self):
self.assert_colobot_lint_result_with_project_headers_and_fake_header_source(
main_file_lines = [
'#include "def/base.h"',
'#include "abc/abc.h"',
'#include "abc/def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
'',
'class Derived : public Base {};'
],
main_file = 'def/src.h',
project_headers = {
'abc/abc.h': [],
'abc/def.h': [],
'def/abc.h': [],
'def/def.h': [],
'def/base.h': [
'class Base {};'
]
},
system_header_files = [],
expected_errors = [
{
'id': 'include style',
'severity': 'style',
'msg': "Expected empty line between include directives",
'line': '2'
}
])
def test_base_class_header_and_config_file(self):
self.assert_colobot_lint_result_with_project_headers_and_fake_header_source(
main_file_lines = [
'#include "def/base.h"',
'',
'#include "config/config.h"',
'',
'#include "abc/abc.h"',
'#include "abc/def.h"',
'',
'#include "def/abc.h"',
'#include "def/def.h"',
'',
'#include <system_header.h>',
'',
'class Derived : public Base {};'
],
main_file = 'def/src.h',
project_headers = {
'config/config.h': [],
'abc/abc.h': [],
'abc/def.h': [],
'def/abc.h': [],
'def/def.h': [],
'def/base.h': [
'class Base {};'
]
},
system_header_files = [
'system_header.h'
],
expected_errors = [])
def test_template_base_class_is_ignored(self):
self.assert_colobot_lint_result_with_project_headers_and_fake_header_source(
main_file_lines = [
'#include "def/src.h"'
],
main_file = 'def/src.cpp',
project_headers = {
'def/src.h': [
'template<typename T>',
'class Base',
'{',
' T a;',
'};',
'template<typename T>'
'class Derived : public Base<T>',
'{',
'};',
'Derived<int> d;'
],
},
system_header_files = [],
expected_errors = [])
| |
# Copyright (c) 2004 Gavin E. Crooks <gec@compbio.berkeley.edu>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Custom extensions to OptionParse for parsing command line options."""
from __future__ import print_function
# FIXME: Docstring
# TODO: Add profiling option
# DeOptionParser :
#
# http://docs.python.org/lib/module-optparse.html
#
# Random_options :
# Set random generator and seed. Use options.random as
# source of random numbers
# Copyright :
# print copyright information
# Documentation :
# print extended document information
#
# Additional file_in and file_out types
import sys
from copy import copy
from optparse import Option
from optparse import OptionParser
from optparse import IndentedHelpFormatter
from optparse import OptionValueError
import random
def _copyright_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print(parser.copyright)
sys.exit()
def _doc_callback(option, opt, value, parser):
if option or opt or value or parser: pass # Shut up lint checker
print(parser.long_description)
sys.exit()
class DeHelpFormatter(IndentedHelpFormatter) :
def __init__ (self,
indent_increment=2,
max_help_position=32,
width=78,
short_first=1):
IndentedHelpFormatter.__init__(
self, indent_increment, max_help_position,
width, short_first)
def format_option_strings (self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = option._short_opts
long_opts = [lopt + " " + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if not short_opts : short_opts = [" ",]
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return " ".join(opts)
def _check_file_in(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return open(value, "r")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_file_out(option, opt, value):
if option or opt or value : pass # Shut up lint checker
try:
return open(value, "w+")
except IOError:
raise OptionValueError(
"option %s: cannot open file: %s" % (opt, value) )
def _check_boolean(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = {'no': False, 'false':False, '0': False,
'yes': True, 'true': True, '1':True }
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from 'yes' or 'no', 'true' or 'false')" % (opt, value))
def _check_dict(option, opt, value) :
if option or opt or value : pass # Shut up lint checker
v = value.lower()
choices = option.choices
try:
return choices[v]
except KeyError:
raise OptionValueError(
"option %s: invalid choice: '%s' " \
"(choose from '%s')" % (opt, value, "', '".join(choices)))
class DeOption(Option):
TYPES = Option.TYPES + ("file_in","file_out", "boolean", "dict")
TYPE_CHECKER = copy(Option.TYPE_CHECKER)
TYPE_CHECKER["file_in"] = _check_file_in
TYPE_CHECKER["file_out"] = _check_file_out
TYPE_CHECKER["boolean"] = _check_boolean
TYPE_CHECKER["dict"] = _check_dict
choices = None
def _new_check_choice(self):
if self.type == "dict":
if self.choices is None:
raise OptionValueError(
"must supply a dictionary of choices for type 'dict'")
elif not isinstance(self.choices, dict):
raise OptionValueError(
"choices must be a dictionary ('%s' supplied)"
% str(type(self.choices)).split("'")[1])
return
self._check_choice()
# Have to override _check_choices so that we can parse
# a dict through to check_dict
CHECK_METHODS = Option.CHECK_METHODS
CHECK_METHODS[2] = _new_check_choice
class DeOptionParser(OptionParser) :
def __init__(self,
usage=None,
option_list=None,
option_class=DeOption,
version=None,
conflict_handler="error",
description=None,
long_description = None,
formatter=DeHelpFormatter(),
add_help_option=True,
prog=None,
copyright=None,
add_verbose_options=True,
add_random_options=False
):
OptionParser.__init__(self,
usage,
option_list,
option_class,
version,
conflict_handler,
description,
formatter,
add_help_option,
prog )
if long_description :
self.long_description = long_description
self.add_option("--doc",
action="callback",
callback=_doc_callback,
help="Detailed documentation")
if copyright :
self.copyright = copyright
self.add_option("--copyright",
action="callback",
callback=_copyright_callback,
help="")
if add_verbose_options :
self.add_option("-q", "--quite",
action="store_false",
dest="verbose",
default=False,
help="Run quietly (default)")
self.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output (Not quite)")
self.random_options = False
if add_random_options :
self.random_options = True
self.add_option("--seed",
action="store",
type = "int",
dest="random_seed",
help="Initial seed for pseudo-random number generator. (default: System time)",
metavar="INTEGER" )
self.add_option("--generator",
action="store",
dest="random_generator",
default="MersenneTwister",
help="Select MersenneTwister (default) or WichmannHill pseudo-random number generator",
metavar="TYPE" )
def parse_args(self,args, values=None) :
(options, args) = OptionParser.parse_args(self, args, values)
if self.random_options :
if options.random_generator is None or options.random_generator =="MersenneTwister" :
r = random.Random()
elif options.random_generator == "WichmannHill" :
r = random.WichmannHill()
else :
self.error("Acceptible generators are MersenneTwister (default) or WichmannHill")
if options.random_seed :
r.seed(options.random_seed)
options.__dict__["random"] = r
return (options, args)
| |
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that helps to triage Commit Queue failures."""
from __future__ import print_function
import ConfigParser
import glob
import os
import pprint
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
from chromite.lib import portage_util
def GetRelevantOverlaysForConfig(config, build_root):
"""Returns a list of overlays relevant to |config|.
Args:
config: A cbuildbot config name.
build_root: Path to the build root.
Returns:
A set of overlays.
"""
relevant_overlays = set()
for board in config.boards:
overlays = portage_util.FindOverlays(
constants.BOTH_OVERLAYS, board, build_root)
relevant_overlays.update(overlays)
return relevant_overlays
def _GetAffectedImmediateSubdirs(change, git_repo):
"""Gets the set of immediate subdirs affected by |change|.
Args:
change: GitRepoPatch to examine.
git_repo: Path to checkout of git repository.
Returns:
A set of absolute paths to modified subdirectories of |git_repo|.
"""
return set([os.path.join(git_repo, path.split(os.path.sep)[0])
for path in change.GetDiffStatus(git_repo)])
def _GetCommonAffectedSubdir(change, git_repo):
"""Gets the longest common path of changes in |change|.
Args:
change: GitRepoPatch to examine.
git_repo: Path to checkout of git repository.
Returns:
An absolute path in |git_repo|.
"""
affected_paths = [os.path.join(git_repo, path)
for path in change.GetDiffStatus(git_repo)]
return cros_build_lib.GetCommonPathPrefix(affected_paths)
def GetAffectedOverlays(change, manifest, all_overlays):
"""Get the set of overlays affected by a given change.
Args:
change: The GerritPatch instance to look at.
manifest: A ManifestCheckout instance representing our build directory.
all_overlays: The set of all valid overlays.
Returns:
The set of overlays affected by the specified |change|. If the change
affected something other than an overlay, return None.
"""
checkout = change.GetCheckout(manifest, strict=False)
if checkout:
git_repo = checkout.GetPath(absolute=True)
# The whole git repo is an overlay. Return it.
# Example: src/private-overlays/overlay-x86-zgb-private
if git_repo in all_overlays:
return set([git_repo])
# Get the set of immediate subdirs affected by the change.
# Example: src/overlays/overlay-x86-zgb
subdirs = _GetAffectedImmediateSubdirs(change, git_repo)
# If all of the subdirs are overlays, return them.
if subdirs.issubset(all_overlays):
return subdirs
def GetAffectedPackagesForOverlayChange(change, manifest, overlays):
"""Get the set of packages affected by the overlay |change|.
Args:
change: The GerritPatch instance that modifies an overlay.
manifest: A ManifestCheckout instance representing our build directory.
overlays: List of overlay paths.
Returns:
The set of packages affected by the specified |change|. E.g.
{'chromeos-base/chromite-0.0.1-r1258'}. If the change affects
something other than packages, return None.
"""
checkout = change.GetCheckout(manifest, strict=False)
if checkout:
git_repo = checkout.GetPath(absolute=True)
packages = set()
for path in change.GetDiffStatus(git_repo):
# Determine if path is in a package directory by walking up
# directories and see if there is an ebuild in the directory.
start_path = os.path.join(git_repo, path)
ebuild_path = osutils.FindInPathParents(
'*.ebuild', start_path, test_func=glob.glob, end_path=git_repo)
if ebuild_path:
# Convert git_repo/../*.ebuild to the real ebuild path.
ebuild_path = glob.glob(ebuild_path)[0]
# Double check that the ebuild is two-levels deep in an overlay
# directory.
if os.path.sep.join(ebuild_path.split(os.path.sep)[:-3]) in overlays:
category, pkg_name, _ = portage_util.SplitEbuildPath(ebuild_path)
packages.add('%s/%s' % (category, pkg_name))
continue
# If |change| affects anything other than packages, return None.
return None
return packages
def _GetOptionFromConfigFile(config_path, section, option):
"""Get |option| from |section| in |config_path|.
Args:
config_path: Filename to look at.
section: Section header name.
option: Option name.
Returns:
The value of the option.
"""
parser = ConfigParser.SafeConfigParser()
parser.read(config_path)
if parser.has_option(section, option):
return parser.get(section, option)
def _GetConfigFileForChange(change, checkout_path):
"""Gets the path of the config file for |change|.
This function takes into account the files that are modified by |change| to
determine the commit queue config file within |checkout_path| that should be
used for this change. The config file used is the one in the common ancestor
directory to all changed files, or the nearest parent directory. See
http://chromium.org/chromium-os/build/bypassing-tests-on-a-per-project-basis
Args:
change: Change to examine, as a GitRepoPatch object.
checkout_path: Full absolute path to a checkout of the repository that
|change| applies to.
Returns:
Path to the config file to be read for |change|. The returned path will
be within |checkout_path|. If no config files in common subdirectories
were found, a config file path in the root of the checkout will be
returned, in which case the file is not guaranteed to exist.
"""
current_dir = _GetCommonAffectedSubdir(change, checkout_path)
while True:
config_file = os.path.join(current_dir, constants.CQ_CONFIG_FILENAME)
if os.path.isfile(config_file) or checkout_path.startswith(current_dir):
return config_file
assert current_dir not in ('/', '')
current_dir = os.path.dirname(current_dir)
def GetOptionForChange(build_root, change, section, option):
"""Get |option| from |section| in the config file for |change|.
Args:
build_root: The root of the checkout.
change: Change to examine, as a GitRepoPatch object.
section: Section header name.
option: Option name.
Returns:
The value of the option.
"""
manifest = git.ManifestCheckout.Cached(build_root)
checkout = change.GetCheckout(manifest)
if checkout:
dirname = checkout.GetPath(absolute=True)
config_path = _GetConfigFileForChange(change, dirname)
return _GetOptionFromConfigFile(config_path, section, option)
def GetStagesToIgnoreForChange(build_root, change):
"""Get a list of stages that the CQ should ignore for a given |change|.
The list of stage name prefixes to ignore for each project is specified in a
config file inside the project, named COMMIT-QUEUE.ini. The file would look
like this:
[GENERAL]
ignored-stages: HWTest VMTest
The CQ will submit changes to the given project even if the listed stages
failed. These strings are stage name prefixes, meaning that "HWTest" would
match any HWTest stage (e.g. "HWTest [bvt]" or "HWTest [foo]")
Args:
build_root: The root of the checkout.
change: Change to examine, as a PatchQuery object.
Returns:
A list of stages to ignore for the given |change|.
"""
result = None
try:
result = GetOptionForChange(build_root, change, 'GENERAL',
'ignored-stages')
except ConfigParser.Error:
logging.error('%s has malformed config file', change, exc_info=True)
return result.split() if result else []
class CategorizeChanges(object):
"""A collection of methods to help categorize GerritPatch changes.
This class is mainly used on a build slave to categorize changes
applied in the build.
"""
@classmethod
def ClassifyOverlayChanges(cls, changes, config, build_root, manifest,
packages_under_test):
"""Classifies overlay changes in |changes|.
Args:
changes: The list or set of GerritPatch instances.
config: The cbuildbot config.
build_root: Path to the build root.
manifest: A ManifestCheckout instance representing our build directory.
packages_under_test: A list of packages names included in the build
without version/revision (e.g. ['chromeos-base/chromite']). If None,
don't try to map overlay changes to packages.
Returns:
A (overlay_changes, irrelevant_overlay_changes) tuple; overlay_changes
is a subset of |changes| that have modified one or more overlays, and
irrelevant_overlay_changes is a subset of overlay_changes which are
irrelevant to |config|.
"""
visible_overlays = set(portage_util.FindOverlays(config.overlays, None,
build_root))
# The overlays relevant to this build.
relevant_overlays = GetRelevantOverlaysForConfig(config, build_root)
overlay_changes = set()
irrelevant_overlay_changes = set()
for change in changes:
affected_overlays = GetAffectedOverlays(change, manifest,
visible_overlays)
if affected_overlays is not None:
# The change modifies an overlay.
overlay_changes.add(change)
if not any(x in relevant_overlays for x in affected_overlays):
# The change touched an irrelevant overlay.
irrelevant_overlay_changes.add(change)
continue
if packages_under_test:
# If the change modifies packages that are not part of this
# build, they are considered irrelevant too.
packages = GetAffectedPackagesForOverlayChange(
change, manifest, visible_overlays)
if packages:
logging.info('%s affects packages %s',
cros_patch.GetChangesAsString([change]),
', '.join(packages))
if not any(x in packages_under_test for x in packages):
irrelevant_overlay_changes.add(change)
return overlay_changes, irrelevant_overlay_changes
@classmethod
def ClassifyWorkOnChanges(cls, changes, config, build_root,
manifest, packages_under_test):
"""Classifies WorkOn package changes in |changes|.
Args:
changes: The list or set of GerritPatch instances.
config: The cbuildbot config.
build_root: Path to the build root.
manifest: A ManifestCheckout instance representing our build directory.
packages_under_test: A list of packages names included in the build.
(e.g. ['chromeos-base/chromite-0.0.1-r1258']).
Returns:
A (workon_changes, irrelevant_workon_changes) tuple; workon_changes
is a subset of |changes| that have modified workon packages, and
irrelevant_workon_changes is a subset of workon_changes which are
irrelevant to |config|.
"""
workon_changes = set()
irrelevant_workon_changes = set()
workon_dict = portage_util.BuildFullWorkonPackageDictionary(
build_root, config.overlays, manifest)
pp = pprint.PrettyPrinter(indent=2)
logging.info('(project, branch) to workon package mapping:\n %s',
pp.pformat(workon_dict))
logging.info('packages under test\n: %s', pp.pformat(packages_under_test))
for change in changes:
packages = workon_dict.get((change.project, change.tracking_branch))
if packages:
# The CL modifies a workon package.
workon_changes.add(change)
if all(x not in packages_under_test for x in packages):
irrelevant_workon_changes.add(change)
return workon_changes, irrelevant_workon_changes
@classmethod
def _FilterProjectsInManifestByGroup(cls, manifest, groups):
"""Filters projects in |manifest| by |groups|.
Args:
manifest: A git.Manifest instance.
groups: A list of groups to filter.
Returns:
A set of (project, branch) tuples where each tuple is asssociated
with at least one group in |groups|.
"""
results = set()
for project, checkout_list in manifest.checkouts_by_name.iteritems():
for checkout in checkout_list:
if any(x in checkout['groups'] for x in groups):
branch = git.StripRefs(checkout['tracking_branch'])
results.add((project, branch))
return results
@classmethod
def GetChangesToBuildTools(cls, changes, manifest):
"""Returns a changes associated with buildtools projects.
Args:
changes: The list or set of GerritPatch instances.
manifest: A git.Manifest instance.
Returns:
A subset of |changes| to projects of "buildtools" group.
"""
buildtool_set = cls._FilterProjectsInManifestByGroup(
manifest, ['buildtools'])
return set([x for x in changes if (x.project, x.tracking_branch)
in buildtool_set])
@classmethod
def GetIrrelevantChanges(cls, changes, config, build_root, manifest,
packages_under_test):
"""Determine changes irrelavant to build |config|.
This method determine a set of changes that are irrelevant to the
build |config|. The general rule of thumb is that if we are unsure
whether a change is relevant, consider it relevant.
Args:
changes: The list or set of GerritPatch instances.
config: The cbuildbot config.
build_root: Path to the build root.
manifest: A ManifestCheckout instance representing our build directory.
packages_under_test: A list of packages that were tested in this build.
Returns:
A subset of |changes| which are irrelevant to |config|.
"""
untriaged_changes = set(changes)
irrelevant_changes = set()
# Changes that modify projects used in building are always relevant.
untriaged_changes -= cls.GetChangesToBuildTools(changes, manifest)
if packages_under_test is not None:
# Strip the version of the package in packages_under_test.
cpv_list = [portage_util.SplitCPV(x) for x in packages_under_test]
packages_under_test = ['%s/%s' % (x.category, x.package) for x in
cpv_list]
# Handles overlay changes.
# ClassifyOverlayChanges only handles overlays visible to this
# build. For example, an external build may not be able to view
# the internal overlays. However, in that case, the internal changes
# have already been filtered out in CommitQueueSyncStage, and are
# not included in |changes|.
overlay_changes, irrelevant_overlay_changes = cls.ClassifyOverlayChanges(
untriaged_changes, config, build_root, manifest, packages_under_test)
untriaged_changes -= overlay_changes
irrelevant_changes |= irrelevant_overlay_changes
# Handles workon package changes.
if packages_under_test is not None:
try:
workon_changes, irrelevant_workon_changes = cls.ClassifyWorkOnChanges(
untriaged_changes, config, build_root, manifest,
packages_under_test)
except Exception as e:
# Ignore the exception if we cannot categorize workon
# changes. We will conservatively assume the changes are
# relevant.
logging.warning('Unable to categorize cros workon changes: %s', e)
else:
untriaged_changes -= workon_changes
irrelevant_changes |= irrelevant_workon_changes
return irrelevant_changes
class CalculateSuspects(object):
"""Diagnose the cause for a given set of failures."""
@classmethod
def GetBlamedChanges(cls, changes):
"""Returns the changes that have been manually blamed.
Args:
changes: List of GerritPatch changes.
Returns:
A list of |changes| that were marked verified: -1 or
code-review: -2.
"""
# Load the latest info about whether the changes were vetoed, in case they
# were vetoed in the middle of a cbuildbot run. That said, be careful not to
# return info about newer patchsets.
reloaded_changes = gerrit.GetGerritPatchInfoWithPatchQueries(changes)
return [x for x, y in zip(changes, reloaded_changes) if y.WasVetoed()]
@classmethod
def _FindPackageBuildFailureSuspects(cls, changes, messages, sanity):
"""Figure out what CLs are at fault for a set of build failures.
Args:
changes: A list of cros_patch.GerritPatch instances to consider.
messages: A list of failure messages. We will only look at the ones of
type BuildFailureMessage.
sanity: The sanity checker builder passed and the tree was open when
the build started.
"""
suspects = set()
for message in messages:
if message:
suspects.update(
message.FindPackageBuildFailureSuspects(changes, sanity))
elif sanity:
suspects.update(changes)
return suspects
@classmethod
def FilterChangesForInfraFail(cls, changes):
"""Returns a list of changes responsible for infra failures."""
# Chromite changes could cause infra failures.
return [x for x in changes if x.project in constants.INFRA_PROJECTS]
@classmethod
def _MatchesFailureType(cls, messages, fail_type, strict=True):
"""Returns True if all failures are instances of |fail_type|.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
fail_type: The exception class to look for.
strict: If False, treat NoneType message as a match.
Returns:
True if all objects in |messages| are non-None and all failures are
instances of |fail_type|.
"""
return ((not strict or all(messages)) and
all(x.MatchesFailureType(fail_type) for x in messages if x))
@classmethod
def OnlyLabFailures(cls, messages, no_stat):
"""Determine if the cause of build failure was lab failure.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
no_stat: A list of builders which failed prematurely without reporting
status.
Returns:
True if the build failed purely due to lab failures.
"""
# If any builder failed prematuely, lab failure was not the only cause.
return (not no_stat and
cls._MatchesFailureType(messages, failures_lib.TestLabFailure))
@classmethod
def OnlyInfraFailures(cls, messages, no_stat):
"""Determine if the cause of build failure was infrastructure failure.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
no_stat: A list of builders which failed prematurely without reporting
status.
Returns:
True if the build failed purely due to infrastructure failures.
"""
# "Failed to report status" and "NoneType" messages are considered
# infra failures.
return ((not messages and no_stat) or
cls._MatchesFailureType(
messages, failures_lib.InfrastructureFailure, strict=False))
@classmethod
def FindSuspects(cls, changes, messages, infra_fail=False, lab_fail=False,
sanity=True):
"""Find out what changes probably caused our failure.
In cases where there were no internal failures, we can assume that the
external failures are at fault. Otherwise, this function just defers to
_FindPackageBuildFailureSuspects and GetBlamedChanges as needed.
If the failures don't match either case, just fail everything.
Args:
changes: A list of cros_patch.GerritPatch instances to consider.
messages: A list of build failure messages, of type
BuildFailureMessage or of type NoneType.
infra_fail: The build failed purely due to infrastructure failures.
lab_fail: The build failed purely due to test lab infrastructure
failures.
sanity: The sanity checker builder passed and the tree was open when
the build started.
Returns:
A set of changes as suspects.
"""
bad_changes = cls.GetBlamedChanges(changes)
if bad_changes:
# If there are changes that have been set verified=-1 or
# code-review=-2, these changes are the ONLY suspects of the
# failed build.
logging.warning('Detected that some changes have been blamed for '
'the build failure. Only these CLs will be rejected: %s',
cros_patch.GetChangesAsString(bad_changes))
return set(bad_changes)
elif lab_fail:
logging.warning('Detected that the build failed purely due to HW '
'Test Lab failure(s). Will not reject any changes')
return set()
elif infra_fail:
# The non-lab infrastructure errors might have been caused
# by chromite changes.
logging.warning(
'Detected that the build failed due to non-lab infrastructure '
'issue(s). Will only reject chromite changes')
return set(cls.FilterChangesForInfraFail(changes))
return cls._FindPackageBuildFailureSuspects(changes, messages, sanity)
@classmethod
def GetResponsibleOverlays(cls, build_root, messages):
"""Get the set of overlays that could have caused failures.
This loops through the set of builders that failed in a given run and
finds what overlays could have been responsible for the failure.
Args:
build_root: Build root directory.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
Returns:
The set of overlays that could have caused the failures. If we can't
determine what overlays are responsible, returns None.
"""
responsible_overlays = set()
for message in messages:
if message is None:
return None
bot_id = message.builder
config = cbuildbot_config.GetConfig().get(bot_id)
if not config:
return None
responsible_overlays.update(
GetRelevantOverlaysForConfig(config, build_root))
return responsible_overlays
@classmethod
def FilterOutInnocentChanges(cls, build_root, changes, messages):
"""Filter out innocent changes based on failure messages.
Args:
build_root: Build root directory.
changes: GitRepoPatches that might be guilty.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
Returns:
A list of the changes that we could not prove innocent.
"""
# If there were no internal failures, only kick out external changes.
# (Still, fail all changes if we received any None messages.)
candidates = changes
if all(messages) and not any(message.internal for message in messages):
candidates = [change for change in changes if not change.internal]
return cls.FilterOutInnocentOverlayChanges(build_root, candidates, messages)
@classmethod
def FilterOutInnocentOverlayChanges(cls, build_root, changes, messages):
"""Filter out innocent overlay changes based on failure messages.
It is not possible to break a x86-generic builder via a change to an
unrelated overlay (e.g. amd64-generic). Filter out changes that are
known to be innocent.
Args:
build_root: Build root directory.
changes: GitRepoPatches that might be guilty.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
Returns:
A list of the changes that we could not prove innocent.
"""
all_overlays = set(portage_util.FindOverlays(
constants.BOTH_OVERLAYS, None, build_root))
responsible_overlays = cls.GetResponsibleOverlays(build_root, messages)
if responsible_overlays is None:
return changes
manifest = git.ManifestCheckout.Cached(build_root)
candidates = []
for change in changes:
overlays = GetAffectedOverlays(change, manifest, all_overlays)
if overlays is None or overlays.issubset(responsible_overlays):
candidates.append(change)
return candidates
@classmethod
def _CanIgnoreFailures(cls, messages, change, build_root):
"""Examine whether we can ignore the failures for |change|.
Examine the |messages| to see if we are allowed to ignore
the failures base on the per-repository settings in
COMMIT_QUEUE.ini.
Args:
messages: A list of BuildFailureMessage from the failed slaves.
change: A GerritPatch instance to examine.
build_root: Build root directory.
Returns:
True if we can ignore the failures; False otherwise.
"""
# Some repositories may opt to ignore certain stage failures.
failing_stages = set()
if any(x.GetFailingStages() is None for x in messages):
# If there are no tracebacks, that means that the builder
# did not report its status properly. We don't know what
# stages failed and cannot safely ignore any stage.
return False
for message in messages:
failing_stages.update(message.GetFailingStages())
ignored_stages = GetStagesToIgnoreForChange(build_root, change)
if ignored_stages and failing_stages.issubset(ignored_stages):
return True
return False
@classmethod
def GetFullyVerifiedChanges(cls, changes, changes_by_config, failing,
inflight, no_stat, messages, build_root):
"""Examines build failures and returns a set of fully verified changes.
A change is fully verified if all the build configs relevant to
this change have either passed or failed in a manner that can be
safely ignored by the change.
Args:
changes: A list of GerritPatch instances to examine.
changes_by_config: A dictionary of relevant changes indexed by the
config names.
failing: Names of the builders that failed.
inflight: Names of the builders that timed out.
no_stat: Set of builder names of slave builders that had status None.
messages: A list of BuildFailureMessage or NoneType objects from
the failed slaves.
build_root: Build root directory.
Returns:
A set of fully verified changes.
"""
changes = set(changes)
no_stat = set(no_stat)
failing = set(failing)
inflight = set(inflight)
fully_verified = set()
all_tested_changes = set()
for tested_changes in changes_by_config.itervalues():
all_tested_changes.update(tested_changes)
untested_changes = changes - all_tested_changes
if untested_changes:
# Some board overlay changes were not tested by CQ at all.
logging.info('These changes were not tested by any slaves, '
'so they will be submitted: %s',
cros_patch.GetChangesAsString(untested_changes))
fully_verified.update(untested_changes)
for change in all_tested_changes:
# If all relevant configs associated with a change passed, the
# change is fully verified.
relevant_configs = [k for k, v in changes_by_config.iteritems() if
change in v]
if any(x in set.union(no_stat, inflight) for x in relevant_configs):
continue
failed_configs = [x for x in relevant_configs if x in failing]
if not failed_configs:
logging.info('All the %s relevant config(s) for change %s passed, so '
'it will be submitted.', len(relevant_configs),
cros_patch.GetChangesAsString([change]))
fully_verified.add(change)
else:
# Examine the failures and see if we can safely ignore them
# for the change.
failed_messages = [x for x in messages if x.builder in failed_configs]
if cls._CanIgnoreFailures(failed_messages, change, build_root):
logging.info('All failures of relevant configs for change %s are '
'ignorable by this change, so it will be submitted.',
cros_patch.GetChangesAsString([change]))
fully_verified.add(change)
return fully_verified
| |
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server import util
class MultibranchPipeline(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, display_name: str=None, estimated_duration_in_millis: int=None, latest_run: str=None, name: str=None, organization: str=None, weather_score: int=None, branch_names: List[str]=None, number_of_failing_branches: int=None, number_of_failing_pull_requests: int=None, number_of_successful_branches: int=None, number_of_successful_pull_requests: int=None, total_number_of_branches: int=None, total_number_of_pull_requests: int=None, _class: str=None):
"""MultibranchPipeline - a model defined in OpenAPI
:param display_name: The display_name of this MultibranchPipeline.
:param estimated_duration_in_millis: The estimated_duration_in_millis of this MultibranchPipeline.
:param latest_run: The latest_run of this MultibranchPipeline.
:param name: The name of this MultibranchPipeline.
:param organization: The organization of this MultibranchPipeline.
:param weather_score: The weather_score of this MultibranchPipeline.
:param branch_names: The branch_names of this MultibranchPipeline.
:param number_of_failing_branches: The number_of_failing_branches of this MultibranchPipeline.
:param number_of_failing_pull_requests: The number_of_failing_pull_requests of this MultibranchPipeline.
:param number_of_successful_branches: The number_of_successful_branches of this MultibranchPipeline.
:param number_of_successful_pull_requests: The number_of_successful_pull_requests of this MultibranchPipeline.
:param total_number_of_branches: The total_number_of_branches of this MultibranchPipeline.
:param total_number_of_pull_requests: The total_number_of_pull_requests of this MultibranchPipeline.
:param _class: The _class of this MultibranchPipeline.
"""
self.openapi_types = {
'display_name': str,
'estimated_duration_in_millis': int,
'latest_run': str,
'name': str,
'organization': str,
'weather_score': int,
'branch_names': List[str],
'number_of_failing_branches': int,
'number_of_failing_pull_requests': int,
'number_of_successful_branches': int,
'number_of_successful_pull_requests': int,
'total_number_of_branches': int,
'total_number_of_pull_requests': int,
'_class': str
}
self.attribute_map = {
'display_name': 'displayName',
'estimated_duration_in_millis': 'estimatedDurationInMillis',
'latest_run': 'latestRun',
'name': 'name',
'organization': 'organization',
'weather_score': 'weatherScore',
'branch_names': 'branchNames',
'number_of_failing_branches': 'numberOfFailingBranches',
'number_of_failing_pull_requests': 'numberOfFailingPullRequests',
'number_of_successful_branches': 'numberOfSuccessfulBranches',
'number_of_successful_pull_requests': 'numberOfSuccessfulPullRequests',
'total_number_of_branches': 'totalNumberOfBranches',
'total_number_of_pull_requests': 'totalNumberOfPullRequests',
'_class': '_class'
}
self._display_name = display_name
self._estimated_duration_in_millis = estimated_duration_in_millis
self._latest_run = latest_run
self._name = name
self._organization = organization
self._weather_score = weather_score
self._branch_names = branch_names
self._number_of_failing_branches = number_of_failing_branches
self._number_of_failing_pull_requests = number_of_failing_pull_requests
self._number_of_successful_branches = number_of_successful_branches
self._number_of_successful_pull_requests = number_of_successful_pull_requests
self._total_number_of_branches = total_number_of_branches
self._total_number_of_pull_requests = total_number_of_pull_requests
self.__class = _class
@classmethod
def from_dict(cls, dikt: dict) -> 'MultibranchPipeline':
"""Returns the dict as a model
:param dikt: A dict.
:return: The MultibranchPipeline of this MultibranchPipeline.
"""
return util.deserialize_model(dikt, cls)
@property
def display_name(self):
"""Gets the display_name of this MultibranchPipeline.
:return: The display_name of this MultibranchPipeline.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this MultibranchPipeline.
:param display_name: The display_name of this MultibranchPipeline.
:type display_name: str
"""
self._display_name = display_name
@property
def estimated_duration_in_millis(self):
"""Gets the estimated_duration_in_millis of this MultibranchPipeline.
:return: The estimated_duration_in_millis of this MultibranchPipeline.
:rtype: int
"""
return self._estimated_duration_in_millis
@estimated_duration_in_millis.setter
def estimated_duration_in_millis(self, estimated_duration_in_millis):
"""Sets the estimated_duration_in_millis of this MultibranchPipeline.
:param estimated_duration_in_millis: The estimated_duration_in_millis of this MultibranchPipeline.
:type estimated_duration_in_millis: int
"""
self._estimated_duration_in_millis = estimated_duration_in_millis
@property
def latest_run(self):
"""Gets the latest_run of this MultibranchPipeline.
:return: The latest_run of this MultibranchPipeline.
:rtype: str
"""
return self._latest_run
@latest_run.setter
def latest_run(self, latest_run):
"""Sets the latest_run of this MultibranchPipeline.
:param latest_run: The latest_run of this MultibranchPipeline.
:type latest_run: str
"""
self._latest_run = latest_run
@property
def name(self):
"""Gets the name of this MultibranchPipeline.
:return: The name of this MultibranchPipeline.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this MultibranchPipeline.
:param name: The name of this MultibranchPipeline.
:type name: str
"""
self._name = name
@property
def organization(self):
"""Gets the organization of this MultibranchPipeline.
:return: The organization of this MultibranchPipeline.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this MultibranchPipeline.
:param organization: The organization of this MultibranchPipeline.
:type organization: str
"""
self._organization = organization
@property
def weather_score(self):
"""Gets the weather_score of this MultibranchPipeline.
:return: The weather_score of this MultibranchPipeline.
:rtype: int
"""
return self._weather_score
@weather_score.setter
def weather_score(self, weather_score):
"""Sets the weather_score of this MultibranchPipeline.
:param weather_score: The weather_score of this MultibranchPipeline.
:type weather_score: int
"""
self._weather_score = weather_score
@property
def branch_names(self):
"""Gets the branch_names of this MultibranchPipeline.
:return: The branch_names of this MultibranchPipeline.
:rtype: List[str]
"""
return self._branch_names
@branch_names.setter
def branch_names(self, branch_names):
"""Sets the branch_names of this MultibranchPipeline.
:param branch_names: The branch_names of this MultibranchPipeline.
:type branch_names: List[str]
"""
self._branch_names = branch_names
@property
def number_of_failing_branches(self):
"""Gets the number_of_failing_branches of this MultibranchPipeline.
:return: The number_of_failing_branches of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_failing_branches
@number_of_failing_branches.setter
def number_of_failing_branches(self, number_of_failing_branches):
"""Sets the number_of_failing_branches of this MultibranchPipeline.
:param number_of_failing_branches: The number_of_failing_branches of this MultibranchPipeline.
:type number_of_failing_branches: int
"""
self._number_of_failing_branches = number_of_failing_branches
@property
def number_of_failing_pull_requests(self):
"""Gets the number_of_failing_pull_requests of this MultibranchPipeline.
:return: The number_of_failing_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_failing_pull_requests
@number_of_failing_pull_requests.setter
def number_of_failing_pull_requests(self, number_of_failing_pull_requests):
"""Sets the number_of_failing_pull_requests of this MultibranchPipeline.
:param number_of_failing_pull_requests: The number_of_failing_pull_requests of this MultibranchPipeline.
:type number_of_failing_pull_requests: int
"""
self._number_of_failing_pull_requests = number_of_failing_pull_requests
@property
def number_of_successful_branches(self):
"""Gets the number_of_successful_branches of this MultibranchPipeline.
:return: The number_of_successful_branches of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_successful_branches
@number_of_successful_branches.setter
def number_of_successful_branches(self, number_of_successful_branches):
"""Sets the number_of_successful_branches of this MultibranchPipeline.
:param number_of_successful_branches: The number_of_successful_branches of this MultibranchPipeline.
:type number_of_successful_branches: int
"""
self._number_of_successful_branches = number_of_successful_branches
@property
def number_of_successful_pull_requests(self):
"""Gets the number_of_successful_pull_requests of this MultibranchPipeline.
:return: The number_of_successful_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._number_of_successful_pull_requests
@number_of_successful_pull_requests.setter
def number_of_successful_pull_requests(self, number_of_successful_pull_requests):
"""Sets the number_of_successful_pull_requests of this MultibranchPipeline.
:param number_of_successful_pull_requests: The number_of_successful_pull_requests of this MultibranchPipeline.
:type number_of_successful_pull_requests: int
"""
self._number_of_successful_pull_requests = number_of_successful_pull_requests
@property
def total_number_of_branches(self):
"""Gets the total_number_of_branches of this MultibranchPipeline.
:return: The total_number_of_branches of this MultibranchPipeline.
:rtype: int
"""
return self._total_number_of_branches
@total_number_of_branches.setter
def total_number_of_branches(self, total_number_of_branches):
"""Sets the total_number_of_branches of this MultibranchPipeline.
:param total_number_of_branches: The total_number_of_branches of this MultibranchPipeline.
:type total_number_of_branches: int
"""
self._total_number_of_branches = total_number_of_branches
@property
def total_number_of_pull_requests(self):
"""Gets the total_number_of_pull_requests of this MultibranchPipeline.
:return: The total_number_of_pull_requests of this MultibranchPipeline.
:rtype: int
"""
return self._total_number_of_pull_requests
@total_number_of_pull_requests.setter
def total_number_of_pull_requests(self, total_number_of_pull_requests):
"""Sets the total_number_of_pull_requests of this MultibranchPipeline.
:param total_number_of_pull_requests: The total_number_of_pull_requests of this MultibranchPipeline.
:type total_number_of_pull_requests: int
"""
self._total_number_of_pull_requests = total_number_of_pull_requests
@property
def _class(self):
"""Gets the _class of this MultibranchPipeline.
:return: The _class of this MultibranchPipeline.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this MultibranchPipeline.
:param _class: The _class of this MultibranchPipeline.
:type _class: str
"""
self.__class = _class
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import contextlib
import math
import sys
import unittest
from paddle.fluid.executor import Executor
dict_size = 30000
source_dict_dim = target_dict_dim = dict_size
src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size)
hidden_dim = 32
embedding_dim = 16
batch_size = 10
max_length = 50
topk_size = 50
encoder_size = decoder_size = hidden_dim
IS_SPARSE = True
USE_PEEPHOLES = False
def bi_lstm_encoder(input_seq, hidden_size):
input_forward_proj = fluid.layers.fc(input=input_seq,
size=hidden_size * 4,
bias_attr=True)
forward, _ = fluid.layers.dynamic_lstm(
input=input_forward_proj,
size=hidden_size * 4,
use_peepholes=USE_PEEPHOLES)
input_backward_proj = fluid.layers.fc(input=input_seq,
size=hidden_size * 4,
bias_attr=True)
backward, _ = fluid.layers.dynamic_lstm(
input=input_backward_proj,
size=hidden_size * 4,
is_reverse=True,
use_peepholes=USE_PEEPHOLES)
forward_last = fluid.layers.sequence_last_step(input=forward)
backward_first = fluid.layers.sequence_first_step(input=backward)
return forward_last, backward_first
# FIXME(peterzhang2029): Replace this function with the lstm_unit_op.
def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
def linear(inputs):
return fluid.layers.fc(input=inputs, size=size, bias_attr=True)
forget_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
input_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
output_gate = fluid.layers.sigmoid(x=linear([hidden_t_prev, x_t]))
cell_tilde = fluid.layers.tanh(x=linear([hidden_t_prev, x_t]))
cell_t = fluid.layers.sums(input=[
fluid.layers.elementwise_mul(
x=forget_gate, y=cell_t_prev), fluid.layers.elementwise_mul(
x=input_gate, y=cell_tilde)
])
hidden_t = fluid.layers.elementwise_mul(
x=output_gate, y=fluid.layers.tanh(x=cell_t))
return hidden_t, cell_t
def lstm_decoder_without_attention(target_embedding, decoder_boot, context,
decoder_size):
rnn = fluid.layers.DynamicRNN()
cell_init = fluid.layers.fill_constant_batch_size_like(
input=decoder_boot,
value=0.0,
shape=[-1, decoder_size],
dtype='float32')
cell_init.stop_gradient = False
with rnn.block():
current_word = rnn.step_input(target_embedding)
context = rnn.static_input(context)
hidden_mem = rnn.memory(init=decoder_boot, need_reorder=True)
cell_mem = rnn.memory(init=cell_init)
decoder_inputs = fluid.layers.concat(
input=[context, current_word], axis=1)
h, c = lstm_step(decoder_inputs, hidden_mem, cell_mem, decoder_size)
rnn.update_memory(hidden_mem, h)
rnn.update_memory(cell_mem, c)
out = fluid.layers.fc(input=h,
size=target_dict_dim,
bias_attr=True,
act='softmax')
rnn.output(out)
return rnn()
def seq_to_seq_net():
"""Construct a seq2seq network."""
src_word_idx = fluid.layers.data(
name='source_sequence', shape=[1], dtype='int64', lod_level=1)
src_embedding = fluid.layers.embedding(
input=src_word_idx,
size=[source_dict_dim, embedding_dim],
dtype='float32')
src_forward_last, src_backward_first = bi_lstm_encoder(
input_seq=src_embedding, hidden_size=encoder_size)
encoded_vector = fluid.layers.concat(
input=[src_forward_last, src_backward_first], axis=1)
decoder_boot = fluid.layers.fc(input=src_backward_first,
size=decoder_size,
bias_attr=False,
act='tanh')
trg_word_idx = fluid.layers.data(
name='target_sequence', shape=[1], dtype='int64', lod_level=1)
trg_embedding = fluid.layers.embedding(
input=trg_word_idx,
size=[target_dict_dim, embedding_dim],
dtype='float32')
prediction = lstm_decoder_without_attention(trg_embedding, decoder_boot,
encoded_vector, decoder_size)
label = fluid.layers.data(
name='label_sequence', shape=[1], dtype='int64', lod_level=1)
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(cost)
return avg_cost, prediction
def train(use_cuda, save_dirname=None):
[avg_cost, prediction] = seq_to_seq_net()
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
optimizer.minimize(avg_cost)
train_data = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.wmt14.train(dict_size), buf_size=1000),
batch_size=batch_size)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
feed_order = ['source_sequence', 'target_sequence', 'label_sequence']
feed_list = [
framework.default_main_program().global_block().var(var_name)
for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
batch_id = 0
for pass_id in range(2):
for data in train_data():
outs = exe.run(framework.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost])
avg_cost_val = np.array(outs[0])
print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
" avg_cost=" + str(avg_cost_val))
if math.isnan(float(avg_cost_val[0])):
sys.exit("got NaN loss, training failed.")
if batch_id > 3:
if save_dirname is not None:
fluid.io.save_inference_model(
save_dirname, ['source_sequence',
'target_sequence'], [prediction], exe)
return
batch_id += 1
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[4, 6]],
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for two sentences of
# length 4 and 6, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens = [[4, 6]]
base_shape = [1]
# The range of random integers is [low, high]
word_data = fluid.create_random_int_lodtensor(
recursive_seq_lens, base_shape, place, low=0, high=1)
trg_word = fluid.create_random_int_lodtensor(
recursive_seq_lens, base_shape, place, low=0, high=1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
assert feed_target_names[0] == 'source_sequence'
assert feed_target_names[1] == 'target_sequence'
results = exe.run(inference_program,
feed={
feed_target_names[0]: word_data,
feed_target_names[1]: trg_word,
},
fetch_list=fetch_targets,
return_numpy=False)
print(results[0].recursive_sequence_lengths())
np_data = np.array(results[0])
print("Inference shape: ", np_data.shape)
print("Inference results: ", np_data)
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the trained model
save_dirname = "rnn_encoder_decoder.inference.model"
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
class TestRnnEncoderDecoder(unittest.TestCase):
def test_cuda(self):
with self.scope_prog_guard():
main(use_cuda=True)
def test_cpu(self):
with self.scope_prog_guard():
main(use_cuda=False)
@contextlib.contextmanager
def scope_prog_guard(self):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
yield
if __name__ == '__main__':
unittest.main()
| |
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['FS_MOUNT_PATH'] = r'/var/cbs1'
env_map['PARTITION_NAME'] = get_attribute(ctx, 'partition_name')
other_instances_map = _all_instances_get_attribute(ctx, 'partition_name')
if other_instances_map is not None:
for other_instances_key in other_instances_map:
env_map[other_instances_key + 'PARTITION_NAME'] = other_instances_map[other_instances_key]
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/LinuxFileSystem1/tosca.interfaces.node.lifecycle.Standard/start/mount.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:start:{0}'.format(k)] = v
ctx.instance.runtime_properties['partition_name'] = get_attribute(ctx, '_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:PARTITION_NAME')
ctx.instance.update()
| |
#!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from multiprocessing import Pool
import os
import re
import socket
import time
import urllib
import urllib2
import base64
import csv
import sys
import random
import ssl
import boto
import boto.ec2
import paramiko
STATE_FILENAME = os.path.expanduser('~/.bees')
# Utilities
def _read_server_list():
instance_ids = []
if not os.path.isfile(STATE_FILENAME):
return (None, None, None, None)
with open(STATE_FILENAME, 'r') as f:
username = f.readline().strip()
key_name = f.readline().strip()
zone = f.readline().strip()
text = f.read()
instance_ids = [i for i in text.split('\n') if i != '']
print 'Read %i bees from the roster.' % len(instance_ids)
return (username, key_name, zone, instance_ids)
def _write_server_list(username, key_name, zone, instances):
with open(STATE_FILENAME, 'w') as f:
f.write('%s\n' % username)
f.write('%s\n' % key_name)
f.write('%s\n' % zone)
f.write('\n'.join([instance.id for instance in instances]))
def _delete_server_list():
os.remove(STATE_FILENAME)
def _get_pem_path(key):
return os.path.expanduser('~/.ssh/%s.pem' % key)
def _get_region(zone):
return zone if 'gov' in zone else zone[:-1] # chop off the "d" in the "us-east-1d" to get the "Region"
def _get_security_group_ids(connection, security_group_names, subnet):
ids = []
# Since we cannot get security groups in a vpc by name, we get all security groups and parse them by name later
security_groups = connection.get_all_security_groups()
# Parse the name of each security group and add the id of any match to the group list
for group in security_groups:
for name in security_group_names:
if group.name == name:
if subnet == None:
if group.vpc_id == None:
ids.append(group.id)
elif group.vpc_id != None:
ids.append(group.id)
return ids
# Methods
def up(count, group, zone, image_id, instance_type, username, key_name, subnet, bid = None):
"""
Startup the load testing server.
"""
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
count = int(count)
if existing_username == username and existing_key_name == key_name and existing_zone == zone:
# User, key and zone match existing values and instance ids are found on state file
if count <= len(instance_ids):
# Count is less than the amount of existing instances. No need to create new ones.
print 'Bees are already assembled and awaiting orders.'
return
else:
# Count is greater than the amount of existing instances. Need to create the only the extra instances.
count -= len(instance_ids)
elif instance_ids:
# Instances found on state file but user, key and/or zone not matching existing value.
# State file only stores one user/key/zone config combination so instances are unusable.
print 'Taking down {} unusable bees.'.format(len(instance_ids))
# Redirect prints in down() to devnull to avoid duplicate messages
_redirect_stdout('/dev/null', down)
# down() deletes existing state file so _read_server_list() returns a blank state
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
pem_path = _get_pem_path(key_name)
if not os.path.isfile(pem_path):
print 'Warning. No key file found for %s. You will need to add this key to your SSH agent to connect.' % pem_path
print 'Connecting to the hive.'
try:
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
except boto.exception.NoAuthHandlerFound as e:
print "Authenciation config error, perhaps you do not have a ~/.boto file with correct permissions?"
print e.message
return e
except Exception as e:
print "Unknown error occured:"
print e.message
return e
if ec2_connection == None:
raise Exception("Invalid zone specified? Unable to connect to region using zone name")
if bid:
print 'Attempting to call up %i spot bees, this can take a while...' % count
spot_requests = ec2_connection.request_spot_instances(
image_id=image_id,
price=bid,
count=count,
key_name=key_name,
security_groups=[group] if subnet is None else _get_security_group_ids(ec2_connection, [group], subnet),
instance_type=instance_type,
placement=None if 'gov' in zone else zone,
subnet_id=subnet)
# it can take a few seconds before the spot requests are fully processed
time.sleep(5)
instances = _wait_for_spot_request_fulfillment(ec2_connection, spot_requests)
else:
print 'Attempting to call up %i bees.' % count
try:
reservation = ec2_connection.run_instances(
image_id=image_id,
min_count=count,
max_count=count,
key_name=key_name,
security_groups=[group] if subnet is None else _get_security_group_ids(ec2_connection, [group], subnet),
instance_type=instance_type,
placement=None if 'gov' in zone else zone,
subnet_id=subnet)
except boto.exception.EC2ResponseError as e:
print "Unable to call bees:", e.message
return e
instances = reservation.instances
if instance_ids:
existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
existing_instances = [r.instances[0] for r in existing_reservations]
map(instances.append, existing_instances)
print 'Waiting for bees to load their machine guns...'
instance_ids = instance_ids or []
for instance in filter(lambda i: i.state == 'pending', instances):
instance.update()
while instance.state != 'running':
print '.'
time.sleep(5)
instance.update()
instance_ids.append(instance.id)
print 'Bee %s is ready for the attack.' % instance.id
ec2_connection.create_tags(instance_ids, { "Name": "a bee!" })
_write_server_list(username, key_name, zone, instances)
print 'The swarm has assembled %i bees.' % len(instances)
def report():
"""
Report the status of the load testing servers.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print 'No bees have been mobilized.'
return
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
for instance in instances:
print 'Bee %s: %s @ %s' % (instance.id, instance.state, instance.ip_address)
def down():
"""
Shutdown the load testing server.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print 'No bees have been mobilized.'
return
print 'Connecting to the hive.'
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print 'Calling off the swarm.'
terminated_instance_ids = ec2_connection.terminate_instances(
instance_ids=instance_ids)
print 'Stood down %i bees.' % len(terminated_instance_ids)
_delete_server_list()
def _wait_for_spot_request_fulfillment(conn, requests, fulfilled_requests = []):
"""
Wait until all spot requests are fulfilled.
Once all spot requests are fulfilled, return a list of corresponding spot instances.
"""
if len(requests) == 0:
reservations = conn.get_all_instances(instance_ids = [r.instance_id for r in fulfilled_requests])
return [r.instances[0] for r in reservations]
else:
time.sleep(10)
print '.'
requests = conn.get_all_spot_instance_requests(request_ids=[req.id for req in requests])
for req in requests:
if req.status.code == 'fulfilled':
fulfilled_requests.append(req)
print "spot bee `{}` joined the swarm.".format(req.instance_id)
return _wait_for_spot_request_fulfillment(conn, [r for r in requests if r not in fulfilled_requests], fulfilled_requests)
def _attack(params):
"""
Test the target URL with requests.
Intended for use with multiprocessing.
"""
print 'Bee %i is joining the swarm.' % params['i']
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None
if not os.path.isfile(pem_path):
client.load_system_host_keys()
client.connect(params['instance_name'], username=params['username'])
else:
client.connect(
params['instance_name'],
username=params['username'],
key_filename=pem_path)
print 'Bee %i is firing her machine gun. Bang bang!' % params['i']
options = ''
if params['headers'] is not '':
for h in params['headers'].split(';'):
if h != '':
options += ' -H "%s"' % h.strip()
stdin, stdout, stderr = client.exec_command('mktemp')
params['csv_filename'] = stdout.read().strip()
if params['csv_filename']:
options += ' -e %(csv_filename)s' % params
else:
print 'Bee %i lost sight of the target (connection timed out creating csv_filename).' % params['i']
return None
if params['post_file']:
pem_file_path=_get_pem_path(params['key_name'])
os.system("scp -q -o 'StrictHostKeyChecking=no' -i %s %s %s@%s:/tmp/honeycomb" % (pem_file_path, params['post_file'], params['username'], params['instance_name']))
options += ' -T "%(mime_type)s; charset=UTF-8" -p /tmp/honeycomb' % params
if params['keep_alive']:
options += ' -k'
if params['cookies'] is not '':
options += ' -H \"Cookie: %s;sessionid=NotARealSessionID;\"' % params['cookies']
else:
options += ' -C \"sessionid=NotARealSessionID\"'
if params['basic_auth'] is not '':
options += ' -A %s' % params['basic_auth']
params['options'] = options
benchmark_command = 'ab -v 3 -r -n %(num_requests)s -c %(concurrent_requests)s %(options)s "%(url)s"' % params
stdin, stdout, stderr = client.exec_command(benchmark_command)
response = {}
ab_results = stdout.read()
ms_per_request_search = re.search('Time\ per\ request:\s+([0-9.]+)\ \[ms\]\ \(mean\)', ab_results)
if not ms_per_request_search:
print 'Bee %i lost sight of the target (connection timed out running ab).' % params['i']
return None
requests_per_second_search = re.search('Requests\ per\ second:\s+([0-9.]+)\ \[#\/sec\]\ \(mean\)', ab_results)
failed_requests = re.search('Failed\ requests:\s+([0-9.]+)', ab_results)
response['failed_requests_connect'] = 0
response['failed_requests_receive'] = 0
response['failed_requests_length'] = 0
response['failed_requests_exceptions'] = 0
if float(failed_requests.group(1)) > 0:
failed_requests_detail = re.search('(Connect: [0-9.]+, Receive: [0-9.]+, Length: [0-9.]+, Exceptions: [0-9.]+)', ab_results)
if failed_requests_detail:
response['failed_requests_connect'] = float(re.search('Connect:\s+([0-9.]+)', failed_requests_detail.group(0)).group(1))
response['failed_requests_receive'] = float(re.search('Receive:\s+([0-9.]+)', failed_requests_detail.group(0)).group(1))
response['failed_requests_length'] = float(re.search('Length:\s+([0-9.]+)', failed_requests_detail.group(0)).group(1))
response['failed_requests_exceptions'] = float(re.search('Exceptions:\s+([0-9.]+)', failed_requests_detail.group(0)).group(1))
complete_requests_search = re.search('Complete\ requests:\s+([0-9]+)', ab_results)
response['number_of_200s'] = len(re.findall('HTTP/1.1\ 2[0-9][0-9]', ab_results))
response['number_of_300s'] = len(re.findall('HTTP/1.1\ 3[0-9][0-9]', ab_results))
response['number_of_400s'] = len(re.findall('HTTP/1.1\ 4[0-9][0-9]', ab_results))
response['number_of_500s'] = len(re.findall('HTTP/1.1\ 5[0-9][0-9]', ab_results))
response['ms_per_request'] = float(ms_per_request_search.group(1))
response['requests_per_second'] = float(requests_per_second_search.group(1))
response['failed_requests'] = float(failed_requests.group(1))
response['complete_requests'] = float(complete_requests_search.group(1))
stdin, stdout, stderr = client.exec_command('cat %(csv_filename)s' % params)
response['request_time_cdf'] = []
for row in csv.DictReader(stdout):
row["Time in ms"] = float(row["Time in ms"])
response['request_time_cdf'].append(row)
if not response['request_time_cdf']:
print 'Bee %i lost sight of the target (connection timed out reading csv).' % params['i']
return None
print 'Bee %i is out of ammo.' % params['i']
client.close()
return response
except socket.error, e:
return e
def _summarize_results(results, params, csv_filename):
summarized_results = dict()
summarized_results['timeout_bees'] = [r for r in results if r is None]
summarized_results['exception_bees'] = [r for r in results if type(r) == socket.error]
summarized_results['complete_bees'] = [r for r in results if r is not None and type(r) != socket.error]
summarized_results['timeout_bees_params'] = [p for r, p in zip(results, params) if r is None]
summarized_results['exception_bees_params'] = [p for r, p in zip(results, params) if type(r) == socket.error]
summarized_results['complete_bees_params'] = [p for r, p in zip(results, params) if r is not None and type(r) != socket.error]
summarized_results['num_timeout_bees'] = len(summarized_results['timeout_bees'])
summarized_results['num_exception_bees'] = len(summarized_results['exception_bees'])
summarized_results['num_complete_bees'] = len(summarized_results['complete_bees'])
complete_results = [r['complete_requests'] for r in summarized_results['complete_bees']]
summarized_results['total_complete_requests'] = sum(complete_results)
complete_results = [r['failed_requests'] for r in summarized_results['complete_bees']]
summarized_results['total_failed_requests'] = sum(complete_results)
complete_results = [r['failed_requests_connect'] for r in summarized_results['complete_bees']]
summarized_results['total_failed_requests_connect'] = sum(complete_results)
complete_results = [r['failed_requests_receive'] for r in summarized_results['complete_bees']]
summarized_results['total_failed_requests_receive'] = sum(complete_results)
complete_results = [r['failed_requests_length'] for r in summarized_results['complete_bees']]
summarized_results['total_failed_requests_length'] = sum(complete_results)
complete_results = [r['failed_requests_exceptions'] for r in summarized_results['complete_bees']]
summarized_results['total_failed_requests_exceptions'] = sum(complete_results)
complete_results = [r['number_of_200s'] for r in summarized_results['complete_bees']]
summarized_results['total_number_of_200s'] = sum(complete_results)
complete_results = [r['number_of_300s'] for r in summarized_results['complete_bees']]
summarized_results['total_number_of_300s'] = sum(complete_results)
complete_results = [r['number_of_400s'] for r in summarized_results['complete_bees']]
summarized_results['total_number_of_400s'] = sum(complete_results)
complete_results = [r['number_of_500s'] for r in summarized_results['complete_bees']]
summarized_results['total_number_of_500s'] = sum(complete_results)
complete_results = [r['requests_per_second'] for r in summarized_results['complete_bees']]
summarized_results['mean_requests'] = sum(complete_results)
complete_results = [r['ms_per_request'] for r in summarized_results['complete_bees']]
if summarized_results['num_complete_bees'] == 0:
summarized_results['mean_response'] = "no bees are complete"
else:
summarized_results['mean_response'] = sum(complete_results) / summarized_results['num_complete_bees']
summarized_results['tpr_bounds'] = params[0]['tpr']
summarized_results['rps_bounds'] = params[0]['rps']
if summarized_results['tpr_bounds'] is not None:
if summarized_results['mean_response'] < summarized_results['tpr_bounds']:
summarized_results['performance_accepted'] = True
else:
summarized_results['performance_accepted'] = False
if summarized_results['rps_bounds'] is not None:
if summarized_results['mean_requests'] > summarized_results['rps_bounds'] and summarized_results['performance_accepted'] is True or None:
summarized_results['performance_accepted'] = True
else:
summarized_results['performance_accepted'] = False
summarized_results['request_time_cdf'] = _get_request_time_cdf(summarized_results['total_complete_requests'], summarized_results['complete_bees'])
if csv_filename:
_create_request_time_cdf_csv(results, summarized_results['complete_bees_params'], summarized_results['request_time_cdf'], csv_filename)
return summarized_results
def _create_request_time_cdf_csv(results, complete_bees_params, request_time_cdf, csv_filename):
if csv_filename:
with open(csv_filename, 'w') as stream:
writer = csv.writer(stream)
header = ["% faster than", "all bees [ms]"]
for p in complete_bees_params:
header.append("bee %(instance_id)s [ms]" % p)
writer.writerow(header)
for i in range(100):
row = [i, request_time_cdf[i]] if i < len(request_time_cdf) else [i,float("inf")]
for r in results:
if r is not None:
row.append(r['request_time_cdf'][i]["Time in ms"])
writer.writerow(row)
def _get_request_time_cdf(total_complete_requests, complete_bees):
# Recalculate the global cdf based on the csv files collected from
# ab. Can do this by sampling the request_time_cdfs for each of
# the completed bees in proportion to the number of
# complete_requests they have
n_final_sample = 100
sample_size = 100 * n_final_sample
n_per_bee = [int(r['complete_requests'] / total_complete_requests * sample_size)
for r in complete_bees]
sample_response_times = []
for n, r in zip(n_per_bee, complete_bees):
cdf = r['request_time_cdf']
for i in range(n):
j = int(random.random() * len(cdf))
sample_response_times.append(cdf[j]["Time in ms"])
sample_response_times.sort()
request_time_cdf = sample_response_times[0:sample_size:sample_size / n_final_sample]
return request_time_cdf
def _print_results(summarized_results):
"""
Print summarized load-testing results.
"""
if summarized_results['exception_bees']:
print ' %i of your bees didn\'t make it to the action. They might be taking a little longer than normal to find their machine guns, or may have been terminated without using "bees down".' % summarized_results['num_exception_bees']
if summarized_results['timeout_bees']:
print ' Target timed out without fully responding to %i bees.' % summarized_results['num_timeout_bees']
if summarized_results['num_complete_bees'] == 0:
print ' No bees completed the mission. Apparently your bees are peace-loving hippies.'
return
print ' Complete requests:\t\t%i' % summarized_results['total_complete_requests']
print ' Failed requests:\t\t%i' % summarized_results['total_failed_requests']
print ' connect:\t\t%i' % summarized_results['total_failed_requests_connect']
print ' receive:\t\t%i' % summarized_results['total_failed_requests_receive']
print ' length:\t\t%i' % summarized_results['total_failed_requests_length']
print ' exceptions:\t\t%i' % summarized_results['total_failed_requests_exceptions']
print ' Response Codes:'
print ' 2xx:\t\t%i' % summarized_results['total_number_of_200s']
print ' 3xx:\t\t%i' % summarized_results['total_number_of_300s']
print ' 4xx:\t\t%i' % summarized_results['total_number_of_400s']
print ' 5xx:\t\t%i' % summarized_results['total_number_of_500s']
print ' Requests per second:\t%f [#/sec] (mean of bees)' % summarized_results['mean_requests']
if 'rps_bounds' in summarized_results and summarized_results['rps_bounds'] is not None:
print ' Requests per second:\t%f [#/sec] (upper bounds)' % summarized_results['rps_bounds']
print ' Time per request:\t\t%f [ms] (mean of bees)' % summarized_results['mean_response']
if 'tpr_bounds' in summarized_results and summarized_results['tpr_bounds'] is not None:
print ' Time per request:\t\t%f [ms] (lower bounds)' % summarized_results['tpr_bounds']
print ' 50%% responses faster than:\t%f [ms]' % summarized_results['request_time_cdf'][49]
print ' 90%% responses faster than:\t%f [ms]' % summarized_results['request_time_cdf'][89]
if 'performance_accepted' in summarized_results:
print ' Performance check:\t\t%s' % summarized_results['performance_accepted']
if summarized_results['mean_response'] < 500:
print 'Mission Assessment: Target crushed bee offensive.'
elif summarized_results['mean_response'] < 1000:
print 'Mission Assessment: Target successfully fended off the swarm.'
elif summarized_results['mean_response'] < 1500:
print 'Mission Assessment: Target wounded, but operational.'
elif summarized_results['mean_response'] < 2000:
print 'Mission Assessment: Target severely compromised.'
else:
print 'Mission Assessment: Swarm annihilated target.'
def attack(url, n, c, **options):
"""
Test the root url of this site.
"""
username, key_name, zone, instance_ids = _read_server_list()
headers = options.get('headers', '')
csv_filename = options.get("csv_filename", '')
cookies = options.get('cookies', '')
post_file = options.get('post_file', '')
keep_alive = options.get('keep_alive', False)
basic_auth = options.get('basic_auth', '')
if csv_filename:
try:
stream = open(csv_filename, 'w')
except IOError, e:
raise IOError("Specified csv_filename='%s' is not writable. Check permissions or specify a different filename and try again." % csv_filename)
if not instance_ids:
print 'No bees are ready to attack.'
return
print 'Connecting to the hive.'
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print 'Assembling bees.'
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
instance_count = len(instances)
if n < instance_count * 2:
print 'bees: error: the total number of requests must be at least %d (2x num. instances)' % (instance_count * 2)
return
if c < instance_count:
print 'bees: error: the number of concurrent requests must be at least %d (num. instances)' % instance_count
return
if n < c:
print 'bees: error: the number of concurrent requests (%d) must be at most the same as number of requests (%d)' % (c, n)
return
requests_per_instance = int(float(n) / instance_count)
connections_per_instance = int(float(c) / instance_count)
print 'Each of %i bees will fire %s rounds, %s at a time.' % (instance_count, requests_per_instance, connections_per_instance)
params = []
for i, instance in enumerate(instances):
params.append({
'i': i,
'instance_id': instance.id,
'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name,
'url': url,
'concurrent_requests': connections_per_instance,
'num_requests': requests_per_instance,
'username': username,
'key_name': key_name,
'headers': headers,
'cookies': cookies,
'post_file': options.get('post_file'),
'keep_alive': options.get('keep_alive'),
'mime_type': options.get('mime_type', ''),
'tpr': options.get('tpr'),
'rps': options.get('rps'),
'basic_auth': options.get('basic_auth')
})
print 'Stinging URL so it will be cached for the attack.'
request = urllib2.Request(url)
# Need to revisit to support all http verbs.
if post_file:
try:
with open(post_file, 'r') as content_file:
content = content_file.read()
request.add_data(content)
except IOError:
print 'bees: error: The post file you provided doesn\'t exist.'
return
if cookies is not '':
request.add_header('Cookie', cookies)
if basic_auth is not '':
authentication = base64.encodestring(basic_auth).replace('\n', '')
request.add_header('Authorization', 'Basic %s' % authentication)
# Ping url so it will be cached for testing
dict_headers = {}
if headers is not '':
dict_headers = headers = dict(j.split(':') for j in [i.strip() for i in headers.split(';') if i != ''])
for key, value in dict_headers.iteritems():
request.add_header(key, value)
if url.lower().startswith("https://") and hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
response = urllib2.urlopen(request, context=context)
else:
response = urllib2.urlopen(request)
response.read()
print 'Organizing the swarm.'
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
results = pool.map(_attack, params)
summarized_results = _summarize_results(results, params, csv_filename)
print 'Offensive complete.'
_print_results(summarized_results)
print 'The swarm is awaiting new orders.'
if 'performance_accepted' in summarized_results:
if summarized_results['performance_accepted'] is False:
print("Your targets performance tests did not meet our standard.")
sys.exit(1)
else:
print('Your targets performance tests meet our standards, the Queen sends her regards.')
sys.exit(0)
def _redirect_stdout(outfile, func, *args, **kwargs):
save_out = sys.stdout
with open(outfile, 'w') as redir_out:
sys.stdout = redir_out
func(*args, **kwargs)
sys.stdout = save_out
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import calendar
import datetime
import json
import uuid
import math
from collections import OrderedDict
from decimal import *
from datetime import timedelta
from aenum import Enum
from isodate import parse_duration, duration_isoformat
from gremlin_python import statics
from gremlin_python.statics import FloatType, FunctionType, IntType, LongType, TypeType, SingleByte, ByteBufferType, SingleChar
from gremlin_python.process.traversal import Binding, Bytecode, P, TextP, Traversal, Traverser, TraversalStrategy
from gremlin_python.structure.graph import Edge, Property, Vertex, VertexProperty, Path
from gremlin_python.structure.io.util import SymbolUtil
# When we fall back to a superclass's serializer, we iterate over this map.
# We want that iteration order to be consistent, so we use an OrderedDict,
# not a dict.
_serializers = OrderedDict()
_deserializers = {}
class GraphSONTypeType(type):
def __new__(mcs, name, bases, dct):
cls = super(GraphSONTypeType, mcs).__new__(mcs, name, bases, dct)
if not name.startswith('_'):
if cls.python_type:
_serializers[cls.python_type] = cls
if cls.graphson_type:
_deserializers[cls.graphson_type] = cls
return cls
class GraphSONUtil(object):
TYPE_KEY = "@type"
VALUE_KEY = "@value"
@classmethod
def typed_value(cls, type_name, value, prefix="g"):
out = {cls.TYPE_KEY: cls.format_type(prefix, type_name)}
if value is not None:
out[cls.VALUE_KEY] = value
return out
@classmethod
def format_type(cls, prefix, type_name):
return "%s:%s" % (prefix, type_name)
# Read/Write classes split to follow precedence of the Java API
class GraphSONWriter(object):
def __init__(self, serializer_map=None):
"""
:param serializer_map: map from Python type to serializer instance implementing `dictify`
"""
self.serializers = _serializers.copy()
if serializer_map:
self.serializers.update(serializer_map)
def write_object(self, objectData):
# to JSON
return json.dumps(self.to_dict(objectData), separators=(',', ':'))
def to_dict(self, obj):
"""
Encodes python objects in GraphSON type-tagged dict values
"""
try:
return self.serializers[type(obj)].dictify(obj, self)
except KeyError:
for key, serializer in self.serializers.items():
if isinstance(obj, key):
return serializer.dictify(obj, self)
# list and map are treated as normal json objs (could be isolated serializers)
if isinstance(obj, (list, set)):
return [self.to_dict(o) for o in obj]
elif isinstance(obj, dict):
return dict((self.to_dict(k), self.to_dict(v)) for k, v in obj.items())
else:
return obj
class GraphSONReader(object):
def __init__(self, deserializer_map=None):
"""
:param deserializer_map: map from GraphSON type tag to deserializer instance implementing `objectify`
"""
self.deserializers = _deserializers.copy()
if deserializer_map:
self.deserializers.update(deserializer_map)
def read_object(self, json_data):
# from JSON
return self.to_object(json.loads(json_data))
def to_object(self, obj):
"""
Unpacks GraphSON type-tagged dict values into objects mapped in self.deserializers
"""
if isinstance(obj, dict):
try:
return self.deserializers[obj[GraphSONUtil.TYPE_KEY]].objectify(obj[GraphSONUtil.VALUE_KEY], self)
except KeyError:
pass
# list and map are treated as normal json objs (could be isolated deserializers)
return dict((self.to_object(k), self.to_object(v)) for k, v in obj.items())
elif isinstance(obj, list):
return [self.to_object(o) for o in obj]
else:
return obj
class _GraphSONTypeIO(object, metaclass=GraphSONTypeType):
python_type = None
graphson_type = None
def dictify(self, obj, writer):
raise NotImplementedError()
def objectify(self, d, reader):
raise NotImplementedError()
class _BytecodeSerializer(_GraphSONTypeIO):
@classmethod
def _dictify_instructions(cls, instructions, writer):
out = []
for instruction in instructions:
inst = [instruction[0]]
inst.extend(writer.to_dict(arg) for arg in instruction[1:])
out.append(inst)
return out
@classmethod
def dictify(cls, bytecode, writer):
if isinstance(bytecode, Traversal):
bytecode = bytecode.bytecode
out = {}
if bytecode.source_instructions:
out["source"] = cls._dictify_instructions(bytecode.source_instructions, writer)
if bytecode.step_instructions:
out["step"] = cls._dictify_instructions(bytecode.step_instructions, writer)
return GraphSONUtil.typed_value("Bytecode", out)
class TraversalSerializer(_BytecodeSerializer):
python_type = Traversal
class BytecodeSerializer(_BytecodeSerializer):
python_type = Bytecode
class VertexSerializer(_GraphSONTypeIO):
python_type = Vertex
graphson_type = "g:Vertex"
@classmethod
def dictify(cls, vertex, writer):
return GraphSONUtil.typed_value("Vertex", {"id": writer.to_dict(vertex.id),
"label": writer.to_dict(vertex.label)})
class EdgeSerializer(_GraphSONTypeIO):
python_type = Edge
graphson_type = "g:Edge"
@classmethod
def dictify(cls, edge, writer):
return GraphSONUtil.typed_value("Edge", {"id": writer.to_dict(edge.id),
"outV": writer.to_dict(edge.outV.id),
"outVLabel": writer.to_dict(edge.outV.label),
"label": writer.to_dict(edge.label),
"inV": writer.to_dict(edge.inV.id),
"inVLabel": writer.to_dict(edge.inV.label)})
class VertexPropertySerializer(_GraphSONTypeIO):
python_type = VertexProperty
graphson_type = "g:VertexProperty"
@classmethod
def dictify(cls, vertex_property, writer):
return GraphSONUtil.typed_value("VertexProperty", {"id": writer.to_dict(vertex_property.id),
"label": writer.to_dict(vertex_property.label),
"value": writer.to_dict(vertex_property.value),
"vertex": writer.to_dict(vertex_property.vertex.id)})
class PropertySerializer(_GraphSONTypeIO):
python_type = Property
graphson_type = "g:Property"
@classmethod
def dictify(cls, property, writer):
element_dict = writer.to_dict(property.element)
if element_dict is not None:
value_dict = element_dict["@value"]
if "outVLabel" in value_dict:
del value_dict["outVLabel"]
if "inVLabel" in value_dict:
del value_dict["inVLabel"]
if "properties" in value_dict:
del value_dict["properties"]
if "value" in value_dict:
del value_dict["value"]
return GraphSONUtil.typed_value("Property", {"key": writer.to_dict(property.key),
"value": writer.to_dict(property.value),
"element": element_dict})
class TraversalStrategySerializer(_GraphSONTypeIO):
python_type = TraversalStrategy
@classmethod
def dictify(cls, strategy, writer):
return GraphSONUtil.typed_value(strategy.strategy_name, writer.to_dict(strategy.configuration))
class TraverserIO(_GraphSONTypeIO):
python_type = Traverser
graphson_type = "g:Traverser"
@classmethod
def dictify(cls, traverser, writer):
return GraphSONUtil.typed_value("Traverser", {"value": writer.to_dict(traverser.object),
"bulk": writer.to_dict(traverser.bulk)})
@classmethod
def objectify(cls, d, reader):
return Traverser(reader.to_object(d["value"]),
reader.to_object(d["bulk"]))
class EnumSerializer(_GraphSONTypeIO):
python_type = Enum
@classmethod
def dictify(cls, enum, _):
return GraphSONUtil.typed_value(SymbolUtil.to_camel_case(type(enum).__name__),
SymbolUtil.to_camel_case(str(enum.name)))
class PSerializer(_GraphSONTypeIO):
python_type = P
@classmethod
def dictify(cls, p, writer):
out = {"predicate": p.operator,
"value": [writer.to_dict(p.value), writer.to_dict(p.other)] if p.other is not None else
writer.to_dict(p.value)}
return GraphSONUtil.typed_value("P", out)
class TextPSerializer(_GraphSONTypeIO):
python_type = TextP
@classmethod
def dictify(cls, p, writer):
out = {"predicate": p.operator,
"value": [writer.to_dict(p.value), writer.to_dict(p.other)] if p.other is not None else
writer.to_dict(p.value)}
return GraphSONUtil.typed_value("TextP", out)
class BindingSerializer(_GraphSONTypeIO):
python_type = Binding
@classmethod
def dictify(cls, binding, writer):
out = {"key": binding.key,
"value": writer.to_dict(binding.value)}
return GraphSONUtil.typed_value("Binding", out)
class LambdaSerializer(_GraphSONTypeIO):
python_type = FunctionType
@classmethod
def dictify(cls, lambda_object, writer):
lambda_result = lambda_object()
script = lambda_result if isinstance(lambda_result, str) else lambda_result[0]
language = statics.default_lambda_language if isinstance(lambda_result, str) else lambda_result[1]
out = {"script": script,
"language": language}
if language == "gremlin-groovy" and "->" in script:
# if the user has explicitly added parameters to the groovy closure then we can easily detect one or two
# arg lambdas - if we can't detect 1 or 2 then we just go with "unknown"
args = script[0:script.find("->")]
out["arguments"] = 2 if "," in args else 1
else:
out["arguments"] = -1
return GraphSONUtil.typed_value("Lambda", out)
class TypeSerializer(_GraphSONTypeIO):
python_type = TypeType
@classmethod
def dictify(cls, typ, writer):
return writer.to_dict(typ())
class UUIDIO(_GraphSONTypeIO):
python_type = uuid.UUID
graphson_type = "g:UUID"
graphson_base_type = "UUID"
@classmethod
def dictify(cls, obj, writer):
return GraphSONUtil.typed_value(cls.graphson_base_type, str(obj))
@classmethod
def objectify(cls, d, reader):
return cls.python_type(d)
class DateIO(_GraphSONTypeIO):
python_type = datetime.datetime
graphson_type = "g:Date"
graphson_base_type = "Date"
@classmethod
def dictify(cls, obj, writer):
try:
timestamp_seconds = calendar.timegm(obj.utctimetuple())
pts = timestamp_seconds * 1e3 + getattr(obj, 'microsecond', 0) / 1e3
except AttributeError:
pts = calendar.timegm(obj.timetuple()) * 1e3
ts = int(round(pts))
return GraphSONUtil.typed_value(cls.graphson_base_type, ts)
@classmethod
def objectify(cls, ts, reader):
# Python timestamp expects seconds
return datetime.datetime.utcfromtimestamp(ts / 1000.0)
# Based on current implementation, this class must always be declared before FloatIO.
# Seems pretty fragile for future maintainers. Maybe look into this.
class TimestampIO(_GraphSONTypeIO):
"""A timestamp in Python is type float"""
python_type = statics.timestamp
graphson_type = "g:Timestamp"
graphson_base_type = "Timestamp"
@classmethod
def dictify(cls, obj, writer):
# Java timestamp expects milliseconds integer
# Have to use int because of legacy Python
ts = int(round(obj * 1000))
return GraphSONUtil.typed_value(cls.graphson_base_type, ts)
@classmethod
def objectify(cls, ts, reader):
# Python timestamp expects seconds
return cls.python_type(ts / 1000.0)
class _NumberIO(_GraphSONTypeIO):
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
return GraphSONUtil.typed_value(cls.graphson_base_type, n)
@classmethod
def objectify(cls, v, _):
return cls.python_type(v)
class FloatIO(_NumberIO):
python_type = FloatType
graphson_type = "g:Float"
graphson_base_type = "Float"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
elif math.isnan(n):
return GraphSONUtil.typed_value(cls.graphson_base_type, "NaN")
elif math.isinf(n) and n > 0:
return GraphSONUtil.typed_value(cls.graphson_base_type, "Infinity")
elif math.isinf(n) and n < 0:
return GraphSONUtil.typed_value(cls.graphson_base_type, "-Infinity")
else:
return GraphSONUtil.typed_value(cls.graphson_base_type, n)
@classmethod
def objectify(cls, v, _):
if isinstance(v, str):
if v == 'NaN':
return float('nan')
elif v == "Infinity":
return float('inf')
elif v == "-Infinity":
return float('-inf')
return cls.python_type(v)
class BigDecimalIO(_NumberIO):
python_type = Decimal
graphson_type = "gx:BigDecimal"
graphson_base_type = "BigDecimal"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
elif math.isnan(n):
return GraphSONUtil.typed_value(cls.graphson_base_type, "NaN", "gx")
elif math.isinf(n) and n > 0:
return GraphSONUtil.typed_value(cls.graphson_base_type, "Infinity", "gx")
elif math.isinf(n) and n < 0:
return GraphSONUtil.typed_value(cls.graphson_base_type, "-Infinity", "gx")
else:
return GraphSONUtil.typed_value(cls.graphson_base_type, str(n), "gx")
@classmethod
def objectify(cls, v, _):
if isinstance(v, str):
if v == 'NaN':
return Decimal('nan')
elif v == "Infinity":
return Decimal('inf')
elif v == "-Infinity":
return Decimal('-inf')
return Decimal(v)
class DoubleIO(FloatIO):
graphson_type = "g:Double"
graphson_base_type = "Double"
class Int64IO(_NumberIO):
python_type = LongType
graphson_type = "g:Int64"
graphson_base_type = "Int64"
@classmethod
def dictify(cls, n, writer):
# if we exceed Java long range then we need a BigInteger
if isinstance(n, bool):
return n
elif n < -9223372036854775808 or n > 9223372036854775807:
return GraphSONUtil.typed_value("BigInteger", str(n), "gx")
else:
return GraphSONUtil.typed_value(cls.graphson_base_type, n)
class BigIntegerIO(Int64IO):
graphson_type = "gx:BigInteger"
class Int32IO(Int64IO):
python_type = IntType
graphson_type = "g:Int32"
graphson_base_type = "Int32"
@classmethod
def dictify(cls, n, writer):
# if we exceed Java int range then we need a long
if isinstance(n, bool):
return n
elif n < -9223372036854775808 or n > 9223372036854775807:
return GraphSONUtil.typed_value("BigInteger", str(n), "gx")
elif n < -2147483648 or n > 2147483647:
return GraphSONUtil.typed_value("Int64", n)
else:
return GraphSONUtil.typed_value(cls.graphson_base_type, n)
class ByteIO(_NumberIO):
python_type = SingleByte
graphson_type = "gx:Byte"
graphson_base_type = "Byte"
@classmethod
def dictify(cls, n, writer):
if isinstance(n, bool): # because isinstance(False, int) and isinstance(True, int)
return n
return GraphSONUtil.typed_value(cls.graphson_base_type, n, "gx")
@classmethod
def objectify(cls, v, _):
return int.__new__(SingleByte, v)
class ByteBufferIO(_GraphSONTypeIO):
python_type = ByteBufferType
graphson_type = "gx:ByteBuffer"
graphson_base_type = "ByteBuffer"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typed_value(cls.graphson_base_type, "".join(chr(x) for x in n), "gx")
@classmethod
def objectify(cls, v, _):
return cls.python_type(v, "utf8")
class CharIO(_GraphSONTypeIO):
python_type = SingleChar
graphson_type = "gx:Char"
graphson_base_type = "Char"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typed_value(cls.graphson_base_type, n, "gx")
@classmethod
def objectify(cls, v, _):
return str.__new__(SingleChar, v)
class DurationIO(_GraphSONTypeIO):
python_type = timedelta
graphson_type = "gx:Duration"
graphson_base_type = "Duration"
@classmethod
def dictify(cls, n, writer):
return GraphSONUtil.typed_value(cls.graphson_base_type, duration_isoformat(n), "gx")
@classmethod
def objectify(cls, v, _):
return parse_duration(v)
class VertexDeserializer(_GraphSONTypeIO):
graphson_type = "g:Vertex"
@classmethod
def objectify(cls, d, reader):
return Vertex(reader.to_object(d["id"]), d.get("label", "vertex"))
class EdgeDeserializer(_GraphSONTypeIO):
graphson_type = "g:Edge"
@classmethod
def objectify(cls, d, reader):
return Edge(reader.to_object(d["id"]),
Vertex(reader.to_object(d["outV"]), d.get("outVLabel", "vertex")),
d.get("label", "edge"),
Vertex(reader.to_object(d["inV"]), d.get("inVLabel", "vertex")))
class VertexPropertyDeserializer(_GraphSONTypeIO):
graphson_type = "g:VertexProperty"
@classmethod
def objectify(cls, d, reader):
vertex = Vertex(reader.to_object(d.get("vertex"))) if "vertex" in d else None
return VertexProperty(reader.to_object(d["id"]),
d["label"],
reader.to_object(d["value"]),
vertex)
class PropertyDeserializer(_GraphSONTypeIO):
graphson_type = "g:Property"
@classmethod
def objectify(cls, d, reader):
element = reader.to_object(d["element"]) if "element" in d else None
return Property(d["key"], reader.to_object(d["value"]), element)
class PathDeserializer(_GraphSONTypeIO):
graphson_type = "g:Path"
@classmethod
def objectify(cls, d, reader):
labels = [set(label) for label in d["labels"]]
objects = [reader.to_object(o) for o in d["objects"]]
return Path(labels, objects)
class TraversalMetricsDeserializer(_GraphSONTypeIO):
graphson_type = "g:TraversalMetrics"
@classmethod
def objectify(cls, d, reader):
return reader.to_object(d)
class MetricsDeserializer(_GraphSONTypeIO):
graphson_type = "g:Metrics"
@classmethod
def objectify(cls, d, reader):
return reader.to_object(d)
| |
import collections
from ..error import Error
from ..language import ast
def is_input_type(type):
named_type = get_named_type(type)
return isinstance(named_type, (
GraphQLScalarType,
GraphQLEnumType,
GraphQLInputObjectType,
))
def is_composite_type(type):
named_type = get_named_type(type)
return isinstance(named_type, (
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
))
def is_leaf_type(type):
named_type = get_named_type(type)
return isinstance(named_type, (
GraphQLScalarType,
GraphQLEnumType,
))
def get_named_type(type):
unmodified_type = type
while isinstance(unmodified_type, (GraphQLList, GraphQLNonNull)):
unmodified_type = unmodified_type.of_type
return unmodified_type
def get_nullable_type(type):
if isinstance(type, GraphQLNonNull):
return type.of_type
return type
class GraphQLType(object):
def __str__(self):
return self.name
def is_same_type(self, other):
return self.__class__ is other.__class__ and self.name == other.name
class GraphQLScalarType(GraphQLType):
"""Scalar Type Definition
The leaf values of any request and input values to arguments are
Scalars (or Enums) and are defined with a name and a series of coercion
functions used to ensure validity.
Example:
def coerce_odd(value):
if value % 2 == 1:
return value
return None
OddType = GraphQLScalarType(name='Odd', serialize=coerce_odd)
"""
def __init__(self, name, description=None, serialize=None, parse_value=None, parse_literal=None):
assert name, 'Type must be named.'
self.name = name
self.description = description
assert callable(serialize)
if parse_value or parse_literal:
assert callable(parse_value) and callable(parse_literal)
self._serialize = serialize
self._parse_value = parse_value
self._parse_literal = parse_literal
def serialize(self, value):
return self._serialize(value)
def parse_value(self, value):
if self._parse_value:
return self._parse_value(value)
return None
def parse_literal(self, value_ast):
if self._parse_literal:
return self._parse_literal(value_ast)
return None
def __str__(self):
return self.name
class GraphQLObjectType(GraphQLType):
"""Object Type Definition
Almost all of the GraphQL types you define will be object types.
Object types have a name, but most importantly describe their fields.
Example:
AddressType = GraphQLObjectType('Address', {
'street': GraphQLField(GraphQLString),
'number': GraphQLField(GraphQLInt),
'formatted': GraphQLField(GraphQLString,
resolver=lambda obj, *_: obj.number + ' ' + obj.street),
})
When two types need to refer to each other, or a type needs to refer to
itself in a field, you can use a static method to supply the fields
lazily.
Example:
PersonType = GraphQLObjectType('Person', lambda: {
'name': GraphQLField(GraphQLString),
'bestFriend': GraphQLField(PersonType)
})
"""
def __init__(self, name, fields, interfaces=None, is_type_of=None, description=None):
assert name, 'Type must be named.'
self.name = name
self.description = description
self._fields = fields
self._field_map = None
self._interfaces = interfaces or []
self._is_type_of = is_type_of
add_impl_to_interfaces(self)
def get_fields(self):
if self._field_map is None:
self._field_map = define_field_map(self._fields)
return self._field_map
def get_interfaces(self):
return self._interfaces
def is_type_of(self, value):
if self._is_type_of:
return self._is_type_of(value)
def define_field_map(fields):
if callable(fields):
fields = fields()
for field_name, field in fields.items():
field.name = field_name
return fields
def add_impl_to_interfaces(impl):
for type in impl.get_interfaces():
type._impls.append(impl)
class GraphQLField(object):
def __init__(self, type, args=None, resolver=None,
deprecation_reason=None, description=None):
self.type = type
self.args = []
if args:
for arg_name, arg in args.items():
arg.name = arg_name
self.args.append(arg)
self.resolver = resolver
self.deprecation_reason = deprecation_reason
self.description = description
class GraphQLArgument(object):
def __init__(self, type, default_value=None, description=None):
self.type = type
self.default_value = default_value
self.description = description
class GraphQLInterfaceType(GraphQLType):
"""Interface Type Definition
When a field can return one of a heterogeneous set of types, a Interface type is used to describe what types are possible,
what fields are in common across all types, as well as a function to determine which type is actually used when the field is resolved.
Example:
EntityType = GraphQLInterfaceType(
name='Entity',
fields={
'name': GraphQLField(GraphQLString),
})
"""
def __init__(self, name, fields=None, resolve_type=None, description=None):
assert name, 'Type must be named.'
self.name = name
self.description = description
self._fields = fields or {}
self._resolver = resolve_type
self._impls = []
self._field_map = None
self._possible_type_names = None
def get_fields(self):
if self._field_map is None:
self._field_map = define_field_map(self._fields)
return self._field_map
def get_possible_types(self):
return self._impls
def is_possible_type(self, type):
if self._possible_type_names is None:
self._possible_type_names = set(
t.name for t in self.get_possible_types()
)
return type.name in self._possible_type_names
def resolve_type(self, value):
if self._resolver:
return self._resolver(value)
return get_type_of(value, self)
def get_type_of(value, abstract_type):
possible_types = abstract_type.get_possible_types()
for type in possible_types:
is_type_of = type.is_type_of(value)
if is_type_of is None:
raise Error(
'Non-Object Type {} does not implement resolve_type and '
'Object Type {} does not implement is_type_of. '
'There is no way to determine if a value is of this type.'
.format(abstract_type.name, type.name)
)
if is_type_of:
return type
class GraphQLUnionType(GraphQLType):
"""Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type is used to describe what types are possible
as well as providing a function to determine which type is actually used when the field is resolved.
Example:
class PetType(GraphQLUnionType):
name = 'Pet'
types = [DogType, CatType]
def resolve_type(self, value):
if isinstance(value, Dog):
return DogType()
if isinstance(value, Cat):
return CatType()
"""
def __init__(self, name, types=None, resolve_type=None, description=None):
assert name, 'Type must be named.'
self.name = name
self.description = description
assert types, \
'Must provide types for Union {}.'.format(name)
self._possible_type_names = None
non_obj_types = [t for t in types
if not isinstance(t, GraphQLObjectType)]
if non_obj_types:
raise Error(
'Union {} may only contain object types, it cannot '
'contain: {}.'.format(
self.name,
', '.join(str(t) for t in non_obj_types)
)
)
self._types = types
self._resolve_type = resolve_type
def get_possible_types(self):
return self._types
def is_possible_type(self, type):
if self._possible_type_names is None:
self._possible_type_names = set(
t.name for t in self.get_possible_types()
)
return type.name in self._possible_type_names
def resolve_type(self, value):
if self._resolve_type:
return self._resolve_type(value)
return get_type_of(value, self)
class GraphQLEnumType(GraphQLType):
"""Enum Type Definition
Some leaf values of requests and input values are Enums. GraphQL serializes Enum values as strings,
however internally Enums can be represented by any kind of type, often integers.
Example:
RGBType = GraphQLEnumType('RGB', {
'RED': 0,
'GREEN': 1,
'BLUE': 2,
})
Note: If a value is not provided in a definition, the name of the enum value will be used as it's internal value.
"""
def __init__(self, name, values, description=None):
self.name = name
self.description = description
self._values = values
self._value_map = None
self._value_lookup = None
self._name_lookup = None
def get_values(self):
if self._value_map is None:
self._value_map = self._define_value_map()
return self._value_map
def serialize(self, value):
if isinstance(value, collections.Hashable):
enum_value = self._get_value_lookup().get(value)
if enum_value:
return enum_value.name
return None
def parse_value(self, value):
if isinstance(value, collections.Hashable):
enum_value = self._get_value_lookup().get(value)
if enum_value:
return enum_value.name
return None
def parse_literal(self, value_ast):
if isinstance(value_ast, ast.EnumValue):
enum_value = self._get_name_lookup().get(value_ast.value)
if enum_value:
return enum_value.value
def _define_value_map(self):
value_map = {}
for value_name, value in self._values.items():
if not isinstance(value, GraphQLEnumValue):
value = GraphQLEnumValue(value)
value.name = value_name
if value.value is None:
value.value = value_name
value_map[value_name] = value
return value_map
def _get_value_lookup(self):
if self._value_lookup is None:
lookup = {}
for value_name, value in self.get_values().items():
lookup[value.value] = value
self._value_lookup = lookup
return self._value_lookup
def _get_name_lookup(self):
if self._name_lookup is None:
lookup = {}
for value_name, value in self.get_values().items():
lookup[value.name] = value
self._name_lookup = lookup
return self._name_lookup
class GraphQLEnumValue(object):
def __init__(self, value=None, deprecation_reason=None,
description=None):
self.value = value
self.deprecation_reason = deprecation_reason
self.description = description
class GraphQLInputObjectType(GraphQLType):
"""Input Object Type Definition
An input object defines a structured collection of fields which may be
supplied to a field argument.
Using `NonNull` will ensure that a value must be provided by the query
Example:
NonNullFloat = GraphQLNonNull(GraphQLFloat())
class GeoPoint(GraphQLInputObjectType):
name = 'GeoPoint'
fields = {
'lat': GraphQLInputObjectField(NonNullFloat),
'lon': GraphQLInputObjectField(NonNullFloat),
'alt': GraphQLInputObjectField(GraphQLFloat(),
default_value=0)
}
"""
def __init__(self, name, fields, description=None):
assert name, 'Type must be named.'
self.name = name
self.description = description
self._fields = fields
self._field_map = None
def get_fields(self):
if self._field_map is None:
self._field_map = define_field_map(self._fields)
return self._field_map
class GraphQLInputObjectField(object):
def __init__(self, type, default_value=None, description=None):
self.type = type
self.default_value = default_value
self.description = description
class GraphQLList(GraphQLType):
"""List Modifier
A list is a kind of type marker, a wrapping type which points to another
type. Lists are often created within the context of defining the fields
of an object type.
Example:
class PersonType(GraphQLObjectType):
name = 'Person'
def get_fields(self):
return {
'parents': GraphQLField(GraphQLList(PersonType())),
'children': GraphQLField(GraphQLList(PersonType())),
}
"""
def __init__(self, type):
self.of_type = type
def __str__(self):
return '[' + str(self.of_type) + ']'
def is_same_type(self, other):
return isinstance(other, GraphQLList) and self.of_type.is_same_type(other.of_type)
class GraphQLNonNull(GraphQLType):
"""Non-Null Modifier
A non-null is a kind of type marker, a wrapping type which points to another type. Non-null types enforce that their values are never null
and can ensure an error is raised if this ever occurs during a request. It is useful for fields which you can make a strong guarantee on
non-nullability, for example usually the id field of a database row will never be null.
Example:
class RowType(GraphQLObjectType):
name = 'Row'
fields = {
'id': GraphQLField(type=GraphQLNonNull(GraphQLString()))
}
Note: the enforcement of non-nullability occurs within the executor.
"""
def __init__(self, type):
assert not isinstance(type, GraphQLNonNull), \
'Cannot nest NonNull inside NonNull.'
self.of_type = type
def __str__(self):
return str(self.of_type) + '!'
def is_same_type(self, other):
return isinstance(other, GraphQLNonNull) and self.of_type.is_same_type(other.of_type)
| |
"""Shared utilities for different supported platforms."""
import asyncio
from datetime import datetime, timedelta
from http import HTTPStatus
import logging
import aiohttp
import async_timeout
from buienradar.buienradar import parse_data
from buienradar.constants import (
ATTRIBUTION,
CONDITION,
CONTENT,
DATA,
FORECAST,
HUMIDITY,
MESSAGE,
PRESSURE,
STATIONNAME,
STATUS_CODE,
SUCCESS,
TEMPERATURE,
VISIBILITY,
WINDAZIMUTH,
WINDSPEED,
)
from buienradar.urls import JSON_FEED_URL, json_precipitation_forecast_url
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
from .const import SCHEDULE_NOK, SCHEDULE_OK
__all__ = ["BrData"]
_LOGGER = logging.getLogger(__name__)
"""
Log at WARN level after WARN_THRESHOLD failures, otherwise log at
DEBUG level.
"""
WARN_THRESHOLD = 4
def threshold_log(count: int, *args, **kwargs) -> None:
"""Log at warn level after WARN_THRESHOLD failures, debug otherwise."""
if count >= WARN_THRESHOLD:
_LOGGER.warning(*args, **kwargs)
else:
_LOGGER.debug(*args, **kwargs)
class BrData:
"""Get the latest data and updates the states."""
# Initialize to warn immediately if the first call fails.
load_error_count: int = WARN_THRESHOLD
rain_error_count: int = WARN_THRESHOLD
def __init__(self, hass, coordinates, timeframe, devices):
"""Initialize the data object."""
self.devices = devices
self.data = {}
self.hass = hass
self.coordinates = coordinates
self.timeframe = timeframe
async def update_devices(self):
"""Update all devices/sensors."""
if not self.devices:
return
# Update all devices
for dev in self.devices:
dev.data_updated(self.data)
async def schedule_update(self, minute=1):
"""Schedule an update after minute minutes."""
_LOGGER.debug("Scheduling next update in %s minutes", minute)
nxt = dt_util.utcnow() + timedelta(minutes=minute)
async_track_point_in_utc_time(self.hass, self.async_update, nxt)
async def get_data(self, url):
"""Load data from specified url."""
_LOGGER.debug("Calling url: %s", url)
result = {SUCCESS: False, MESSAGE: None}
resp = None
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10):
resp = await websession.get(url)
result[STATUS_CODE] = resp.status
result[CONTENT] = await resp.text()
if resp.status == HTTPStatus.OK:
result[SUCCESS] = True
else:
result[MESSAGE] = "Got http statuscode: %d" % (resp.status)
return result
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
result[MESSAGE] = str(err)
return result
finally:
if resp is not None:
await resp.release()
async def async_update(self, *_):
"""Update the data from buienradar."""
content = await self.get_data(JSON_FEED_URL)
if content.get(SUCCESS) is not True:
# unable to get the data
self.load_error_count += 1
threshold_log(
self.load_error_count,
"Unable to retrieve json data from Buienradar" "(Msg: %s, status: %s,)",
content.get(MESSAGE),
content.get(STATUS_CODE),
)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
self.load_error_count = 0
# rounding coordinates prevents unnecessary redirects/calls
lat = self.coordinates[CONF_LATITUDE]
lon = self.coordinates[CONF_LONGITUDE]
rainurl = json_precipitation_forecast_url(lat, lon)
raincontent = await self.get_data(rainurl)
if raincontent.get(SUCCESS) is not True:
self.rain_error_count += 1
# unable to get the data
threshold_log(
self.rain_error_count,
"Unable to retrieve rain data from Buienradar" "(Msg: %s, status: %s)",
raincontent.get(MESSAGE),
raincontent.get(STATUS_CODE),
)
# schedule new call
await self.schedule_update(SCHEDULE_NOK)
return
self.rain_error_count = 0
result = parse_data(
content.get(CONTENT),
raincontent.get(CONTENT),
self.coordinates[CONF_LATITUDE],
self.coordinates[CONF_LONGITUDE],
self.timeframe,
False,
)
_LOGGER.debug("Buienradar parsed data: %s", result)
if result.get(SUCCESS) is not True:
if int(datetime.now().strftime("%H")) > 0:
_LOGGER.warning(
"Unable to parse data from Buienradar. (Msg: %s)",
result.get(MESSAGE),
)
await self.schedule_update(SCHEDULE_NOK)
return
self.data = result.get(DATA)
await self.update_devices()
await self.schedule_update(SCHEDULE_OK)
@property
def attribution(self):
"""Return the attribution."""
return self.data.get(ATTRIBUTION)
@property
def stationname(self):
"""Return the name of the selected weatherstation."""
return self.data.get(STATIONNAME)
@property
def condition(self):
"""Return the condition."""
return self.data.get(CONDITION)
@property
def temperature(self):
"""Return the temperature, or None."""
try:
return float(self.data.get(TEMPERATURE))
except (ValueError, TypeError):
return None
@property
def pressure(self):
"""Return the pressure, or None."""
try:
return float(self.data.get(PRESSURE))
except (ValueError, TypeError):
return None
@property
def humidity(self):
"""Return the humidity, or None."""
try:
return int(self.data.get(HUMIDITY))
except (ValueError, TypeError):
return None
@property
def visibility(self):
"""Return the visibility, or None."""
try:
return int(self.data.get(VISIBILITY))
except (ValueError, TypeError):
return None
@property
def wind_speed(self):
"""Return the windspeed, or None."""
try:
return float(self.data.get(WINDSPEED))
except (ValueError, TypeError):
return None
@property
def wind_bearing(self):
"""Return the wind bearing, or None."""
try:
return int(self.data.get(WINDAZIMUTH))
except (ValueError, TypeError):
return None
@property
def forecast(self):
"""Return the forecast data."""
return self.data.get(FORECAST)
| |
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR
# PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import inspect
import random
import os
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImageTransformTest( GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
path = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/" )
def testNoPassThrough( self ) :
# We don't want a perfect unfiltered pass-through when the
# transform is the identity. This is because it can cause
# conspicuous jumps when an animated transform happens to
# pass through the identity matrix on a particular frame.
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["filter"].setValue( "blackman-harris" )
self.assertNotEqual( GafferImage.ImageAlgo.imageHash( t["out"] ), GafferImage.ImageAlgo.imageHash( t["in"] ) )
self.assertNotEqual( GafferImage.ImageAlgo.image( t["out"] ), GafferImage.ImageAlgo.image( t["in"] ) )
def testTilesWithSameInputTiles( self ) :
# This particular transform (along with many others) has output tiles
# which share the exact same set of input tiles affecting their result.
# This revealed a bug in ImageTransform::hashChannelData() whereby the
# tile origin wasn't being hashed in to break the hashes for these output
# tiles apart.
r = GafferImage.ImageReader()
r["fileName"].setValue( os.path.join( self.path, "rgb.100x100.exr" ) )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["rotate"].setValue( -1. )
t["transform"]["scale"].setValue( imath.V2f( 1.5, 1. ) )
r2 = GafferImage.ImageReader()
r2["fileName"].setValue( os.path.join( self.path, "knownTransformBug.exr" ) )
self.assertImagesEqual( t["out"], r2["out"], ignoreMetadata = True, maxDifference = 0.05 )
def testImageHash( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
previousHash = GafferImage.ImageAlgo.imageHash( t["out"] )
for plug in t["transform"].children() :
if isinstance( plug, Gaffer.FloatPlug ) :
plug.setValue( 1 )
else :
plug.setValue( imath.V2f( 2 ) )
hash = GafferImage.ImageAlgo.imageHash( t["out"] )
self.assertNotEqual( hash, previousHash )
t["invert"].setValue( True )
invertHash = GafferImage.ImageAlgo.imageHash( t["out"] )
t["invert"].setValue( False )
self.assertNotEqual( invertHash, hash )
previousHash = hash
def testDirtyPropagation( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
for plug in t["transform"].children() :
cs = GafferTest.CapturingSlot( t.plugDirtiedSignal() )
if isinstance( plug, Gaffer.FloatPlug ) :
plug.setValue( 1 )
else :
plug.setValue( imath.V2f( 2 ) )
dirtiedPlugs = { x[0] for x in cs }
self.assertTrue( t["out"]["dataWindow"] in dirtiedPlugs )
self.assertTrue( t["out"]["channelData"] in dirtiedPlugs )
self.assertTrue( t["out"] in dirtiedPlugs )
self.assertFalse( t["out"]["format"] in dirtiedPlugs )
self.assertFalse( t["out"]["metadata"] in dirtiedPlugs )
self.assertFalse( t["out"]["channelNames"] in dirtiedPlugs )
def testOutputFormat( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["translate"].setValue( imath.V2f( 2., 2. ) )
self.assertEqual( t["out"]["format"].hash(), r["out"]["format"].hash() )
def testNonFlatThrows( self ) :
transform = GafferImage.ImageTransform()
transform["transform"]["translate"].setValue( imath.V2f( 20., 20.5 ) )
self.assertRaisesDeepNotSupported( transform )
def testDisabled( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["translate"].setValue( imath.V2f( 2., 2. ) )
t["transform"]["rotate"].setValue( 90 )
t["enabled"].setValue( True )
self.assertNotEqual( GafferImage.ImageAlgo.imageHash( r["out"] ), GafferImage.ImageAlgo.imageHash( t["out"] ) )
t["enabled"].setValue( False )
self.assertEqual( GafferImage.ImageAlgo.imageHash( r["out"] ), GafferImage.ImageAlgo.imageHash( t["out"] ) )
def testPassThrough( self ) :
c = GafferImage.Constant()
t = GafferImage.ImageTransform()
t["in"].setInput( c["out"] )
t["transform"]["translate"].setValue( imath.V2f( 1, 0 ) )
self.assertEqual( t["out"]["metadata"].hash(), c["out"]["metadata"].hash() )
self.assertEqual( t["out"]["format"].hash(), c["out"]["format"].hash() )
self.assertEqual( t["out"]["channelNames"].hash(), c["out"]["channelNames"].hash() )
self.assertEqual( t["out"]["metadata"].getValue(), c["out"]["metadata"].getValue() )
self.assertEqual( t["out"]["format"].getValue(), c["out"]["format"].getValue() )
self.assertEqual( t["out"]["channelNames"].getValue(), c["out"]["channelNames"].getValue() )
def testCopyPaste( self ) :
script = Gaffer.ScriptNode()
script["t"] = GafferImage.ImageTransform()
script.execute( script.serialise( filter = Gaffer.StandardSet( [ script["t"] ] ) ) )
def testAffects( self ) :
c = GafferImage.Constant()
t = GafferImage.ImageTransform()
t["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( t.plugDirtiedSignal() )
c["color"]["r"].setValue( .25 )
self.assertTrue( t["out"]["channelData"] in set( s[0] for s in cs ) )
def testInternalPlugsNotSerialised( self ) :
s = Gaffer.ScriptNode()
s["t"] = GafferImage.ImageTransform()
ss = s.serialise()
for name in s["t"].keys() :
self.assertFalse( '\"{}\"'.format( name ) in ss )
def testRotate360( self ) :
# Check that a 360 degree rotation gives us the
# same image back, regardless of pivot.
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["rotate"].setValue( 360 )
for i in range( 0, 100 ) :
# Check that the ImageTransform isn't being smart and treating
# this as a no-op. If the ImageTransform gets smart we'll need
# to adjust our test to force it to do an actual resampling of
# the image, since that's what we want to test.
self.assertNotEqual(
r["out"].channelDataHash( "R", imath.V2i( 0 ) ),
t["out"].channelDataHash( "R", imath.V2i( 0 ) )
)
# Check that the rotated image is basically the same as the input.
t["transform"]["pivot"].setValue(
imath.V2f( random.uniform( -100, 100 ), random.uniform( -100, 100 ) ),
)
self.assertImagesEqual( r["out"], t["out"], maxDifference = 0.0001, ignoreDataWindow = True )
def testSubpixelTranslate( self ) :
# This checks we can do subpixel translations properly - at one
# time a bug in Resample prevented this.
# Use a Constant and a Crop to make a vertical line.
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 100, 100 ) )
constant["color"].setValue( imath.Color4f( 1 ) )
crop = GafferImage.Crop()
crop["in"].setInput( constant["out"] )
crop["affectDataWindow"].setValue( True )
crop["affectDisplayWindow"].setValue( False )
crop["area"].setValue( imath.Box2i( imath.V2i( 10, 0 ), imath.V2i( 11, 100 ) ) )
# Check it's where we expect
transform = GafferImage.ImageTransform()
transform["in"].setInput( crop["out"] )
transform["filter"].setValue( "rifman" )
def sample( position ) :
sampler = GafferImage.Sampler(
transform["out"],
"R",
imath.Box2i( position, position + imath.V2i( 1 ) )
)
return sampler.sample( position.x, position.y )
self.assertEqual( sample( imath.V2i( 9, 10 ) ), 0 )
self.assertEqual( sample( imath.V2i( 10, 10 ) ), 1 )
self.assertEqual( sample( imath.V2i( 11, 10 ) ), 0 )
# Move it a tiiny bit, and check it has moved
# a tiiny bit.
transform["transform"]["translate"]["x"].setValue( 0.1 )
self.assertEqual( sample( imath.V2i( 9, 10 ) ), 0 )
self.assertGreater( sample( imath.V2i( 10, 10 ) ), 0.9 )
self.assertGreater( sample( imath.V2i( 11, 10 ) ), 0.09 )
def testNegativeScale( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker2x2.exr" ) )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["pivot"].setValue( imath.V2f( 1 ) )
t["transform"]["scale"]["x"].setValue( -1 )
sampler = GafferImage.Sampler(
t["out"],
"R",
t["out"]["dataWindow"].getValue()
)
self.assertEqual( sampler.sample( 0, 0 ), 0 )
self.assertEqual( sampler.sample( 1, 0 ), 1 )
self.assertEqual( sampler.sample( 0, 1 ), 1 )
self.assertEqual( sampler.sample( 1, 1 ), 0 )
def testRotateEmptyDataWindow( self ) :
r = GafferImage.ImageReader()
self.assertEqual( r["out"]["dataWindow"].getValue(), imath.Box2i() )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["rotate"].setValue( 1 )
self.assertEqual( t["out"]["dataWindow"].getValue(), imath.Box2i() )
def testInvertTransform( self ):
r = GafferImage.ImageReader()
r["fileName"].setValue( self.fileName )
t = GafferImage.ImageTransform()
t["in"].setInput( r["out"] )
t["transform"]["rotate"].setValue( 45 )
tInv = GafferImage.ImageTransform()
tInv["in"].setInput( t["out"] )
tInv["transform"]["rotate"].setValue( 45 )
tInv["invert"].setValue( True )
self.assertNotEqual(
r["out"].channelDataHash( "R", imath.V2i( 0 ) ),
t["out"].channelDataHash( "R", imath.V2i( 0 ) )
)
self.assertImagesEqual( r["out"], tInv["out"], maxDifference = 0.5, ignoreDataWindow=True )
@GafferTest.TestRunner.PerformanceTestMethod()
def testRotationPerformance( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform = GafferImage.ImageTransform()
transform["in"].setInput( checker["out"] )
transform["transform"]["pivot"].setValue( imath.V2f( 1500 ) )
transform["transform"]["rotate"].setValue( 2.5 )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testTranslationPerformance( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform = GafferImage.ImageTransform()
transform["in"].setInput( checker["out"] )
transform["transform"]["translate"].setValue( imath.V2f( 2.2 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testDownsizingPerformance( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform = GafferImage.ImageTransform()
transform["in"].setInput( checker["out"] )
transform["transform"]["scale"].setValue( imath.V2f( 0.1 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testUpsizingPerformance( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 1000, 1000 ) )
transform = GafferImage.ImageTransform()
transform["in"].setInput( checker["out"] )
transform["transform"]["scale"].setValue( imath.V2f( 3 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testRotationAndScalingPerformance( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform = GafferImage.ImageTransform()
transform["in"].setInput( checker["out"] )
transform["transform"]["pivot"].setValue( imath.V2f( 1500 ) )
transform["transform"]["rotate"].setValue( 2.5 )
transform["transform"]["scale"].setValue( imath.V2f( 0.75 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testConcatenationPerformance1( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform1 = GafferImage.ImageTransform( "Transform1" )
transform1["in"].setInput( checker["out"] )
transform1["transform"]["pivot"].setValue( imath.V2f( 1500 ) )
transform1["transform"]["rotate"].setValue( 2.5 )
transform2 = GafferImage.ImageTransform( "Transform2" )
transform2["in"].setInput( transform1["out"] )
transform2["transform"]["translate"].setValue( imath.V2f( 10 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform2["out"] )
@GafferTest.TestRunner.PerformanceTestMethod()
def testConcatenationPerformance2( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 3000, 3000 ) )
transform1 = GafferImage.ImageTransform( "Transform1" )
transform1["in"].setInput( checker["out"] )
transform1["transform"]["pivot"].setValue( imath.V2f( 1500 ) )
transform1["transform"]["rotate"].setValue( 2.5 )
transform1["transform"]["scale"].setValue( imath.V2f( 1.1 ) )
transform2 = GafferImage.ImageTransform( "Transform2" )
transform2["in"].setInput( transform1["out"] )
transform2["transform"]["translate"].setValue( imath.V2f( 10 ) )
with GafferTest.TestRunner.PerformanceScope() :
GafferImageTest.processTiles( transform2["out"] )
def testOutTransform( self ) :
t1 = GafferImage.ImageTransform()
t2 = GafferImage.ImageTransform()
t1["transform"]["scale"]["x"].setValue( .5 )
t2["transform"]["scale"]["x"].setValue( 2 )
self.assertNotEqual( t2["__outTransform"].getValue(), imath.M33f() )
t2["in"].setInput( t1["out"] )
self.assertEqual( t2["__outTransform"].getValue(), imath.M33f() )
def testNoContextLeakage( self ) :
c = GafferImage.Constant()
t1 = GafferImage.ImageTransform()
t1["in"].setInput( c["out"] )
t2 = GafferImage.ImageTransform()
t2["in"].setInput( t1["out"] )
with Gaffer.ContextMonitor( root = c ) as cm :
self.assertImagesEqual( t2["out"], t2["out"] )
self.assertEqual(
set( cm.combinedStatistics().variableNames() ),
{ "frame", "framesPerSecond", "image:channelName", "image:tileOrigin" },
)
def testMatrixPlugConnection( self ) :
t1 = GafferImage.ImageTransform()
t2 = GafferImage.ImageTransform()
t2["in"].setInput( t1["out"] )
self.assertTrue( t2["__inTransform"].getInput() == t1["__outTransform"] )
t2["in"].setInput( None )
self.assertFalse( t2["__inTransform"].getInput() == t1["__outTransform"] )
def testMatrixConnectionNotSerialised( self ) :
s = Gaffer.ScriptNode()
s["t1"] = GafferImage.ImageTransform()
s["t2"] = GafferImage.ImageTransform()
s["t2"]["in"].setInput( s["t1"]["out"] )
self.assertEqual( s.serialise().count( "setInput" ), 1 )
def testConcatenation( self ) :
# Identical transformation chains, but one
# with concatenation broken by a Blur node.
#
# checker
# |
# deleteChannels
# /\
# / \
# tc1 t1
# | |
# tc2 blur
# |
# t2
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 200, 200 ) )
deleteChannels = GafferImage.DeleteChannels()
deleteChannels["in"].setInput( checker["out"] )
deleteChannels["channels"].setValue( "A" )
tc1 = GafferImage.ImageTransform()
tc1["in"].setInput( deleteChannels["out"] )
tc1["filter"].setValue( "gaussian" )
tc2 = GafferImage.ImageTransform()
tc2["in"].setInput( tc1["out"] )
tc2["filter"].setInput( tc1["filter"] )
t1 = GafferImage.ImageTransform()
t1["in"].setInput( deleteChannels["out"] )
t1["transform"].setInput( tc1["transform"] )
t1["filter"].setInput( tc1["filter"] )
blur = GafferImage.Blur()
blur["in"].setInput( t1["out"] )
t2 = GafferImage.ImageTransform()
t2["in"].setInput( blur["out"] )
t2["transform"].setInput( tc2["transform"] )
t2["filter"].setInput( tc1["filter"] )
# The blur doesn't do anything except
# break concatentation. Check that tc2
# is practically identical to t2 for
# a range of transforms.
for i in range( 0, 10 ) :
random.seed( i )
translate1 = imath.V2f( random.uniform( -100, 100 ), random.uniform( -100, 100 ) )
rotate1 = random.uniform( -360, 360 )
scale1 = imath.V2f( random.uniform( -2, 2 ), random.uniform( -2, 2 ) )
tc1["transform"]["translate"].setValue( translate1 )
tc1["transform"]["rotate"].setValue( rotate1 )
tc1["transform"]["scale"].setValue( scale1 )
translate2 = imath.V2f( random.uniform( -100, 100 ), random.uniform( -100, 100 ) )
rotate2 = random.uniform( -360, 360 )
scale2 = imath.V2f( random.uniform( -2, 2 ), random.uniform( -2, 2 ) )
tc2["transform"]["translate"].setValue( translate2 )
tc2["transform"]["rotate"].setValue( rotate2 )
tc2["transform"]["scale"].setValue( scale2 )
# The `maxDifference` here is surprisingly high, but visual checks
# show that it is legitimate : differences in filtering are that great.
# The threshold is still significantly lower than the differences between
# checker tiles, so does guarantee that tiles aren't getting out of alignment.
self.assertImagesEqual( tc2["out"], t2["out"], maxDifference = 0.11, ignoreDataWindow = True )
def testDisabledAndNonConcatenating( self ) :
checker = GafferImage.Checkerboard()
checker["format"].setValue( GafferImage.Format( 200, 200 ) )
t1 = GafferImage.ImageTransform()
t1["in"].setInput( checker["out"] )
t1["transform"]["translate"]["x"].setValue( 10 )
t2 = GafferImage.ImageTransform()
t2["in"].setInput( t1["out"] )
t2["transform"]["translate"]["x"].setValue( 10 )
t3 = GafferImage.ImageTransform()
t3["in"].setInput( t2["out"] )
t3["transform"]["translate"]["x"].setValue( 10 )
self.assertEqual( t3["out"]["dataWindow"].getValue().min().x, 30 )
t2["concatenate"].setValue( False )
self.assertEqual( t3["out"]["dataWindow"].getValue().min().x, 30 )
t2["enabled"].setValue( False )
self.assertEqual( t3["out"]["dataWindow"].getValue().min().x, 20 )
if __name__ == "__main__":
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Dave Lapsley, Nicira Networks, Inc.
import mox
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.openstack.common import log
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 42
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS)
LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
TUN_OFPORTS = {constants.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort:
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding:
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(base.BaseTestCase):
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.mox = mox.Mox()
self.addCleanup(self.mox.UnsetStubs)
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tunnel_bridge_mapping'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_OFPORT = 33333
self.VETH_MTU = None
self.inta = self.mox.CreateMock(ip_lib.IPDevice)
self.intb = self.mox.CreateMock(ip_lib.IPDevice)
self.inta.link = self.mox.CreateMock(ip_lib.IpLinkCommand)
self.intb.link = self.mox.CreateMock(ip_lib.IpLinkCommand)
self.mox.StubOutClassWithMocks(ovs_lib, 'OVSBridge')
self.mock_int_bridge = ovs_lib.OVSBridge(self.INT_BRIDGE, 'sudo')
self.mock_int_bridge.get_local_port_mac().AndReturn('000000000001')
self.mock_int_bridge.delete_port('patch-tun')
self.mock_int_bridge.remove_all_flows()
self.mock_int_bridge.add_flow(priority=1, actions='normal')
self.mock_map_tun_bridge = ovs_lib.OVSBridge(
self.MAP_TUN_BRIDGE, 'sudo')
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.remove_all_flows()
self.mock_map_tun_bridge.add_flow(priority=1, actions='normal')
self.mock_int_bridge.delete_port('int-tunnel_bridge_mapping')
self.mock_map_tun_bridge.delete_port('phy-tunnel_bridge_mapping')
self.mock_int_bridge.add_port(self.inta)
self.mock_map_tun_bridge.add_port(self.intb)
self.inta.link.set_up()
self.intb.link.set_up()
self.mock_int_bridge.add_flow(priority=2, in_port=None, actions='drop')
self.mock_map_tun_bridge.add_flow(
priority=2, in_port=None, actions='drop')
self.mock_tun_bridge = ovs_lib.OVSBridge(self.TUN_BRIDGE, 'sudo')
self.mock_tun_bridge.reset_bridge()
self.mock_int_bridge.add_patch_port(
'patch-tun', 'patch-int').AndReturn(self.TUN_OFPORT)
self.mock_tun_bridge.add_patch_port(
'patch-int', 'patch-tun').AndReturn(self.INT_OFPORT)
self.mock_tun_bridge.remove_all_flows()
self.mock_tun_bridge.add_flow(priority=1,
in_port=self.INT_OFPORT,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
self.mock_tun_bridge.add_flow(priority=0, actions='drop')
self.mock_tun_bridge.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst=BCAST_MAC,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.mock_tun_bridge.add_flow(
table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop")
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.INT_OFPORT))
self.mock_tun_bridge.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
self.mock_tun_bridge.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
self.mox.StubOutWithMock(ip_lib, 'device_exists')
ip_lib.device_exists('tunnel_bridge_mapping', 'sudo').AndReturn(True)
ip_lib.device_exists(
'int-tunnel_bridge_mapping', 'sudo').AndReturn(True)
self.mox.StubOutWithMock(ip_lib.IpLinkCommand, 'delete')
ip_lib.IPDevice('int-tunnel_bridge_mapping').link.delete()
self.mox.StubOutClassWithMocks(ip_lib, 'IPWrapper')
ip_lib.IPWrapper('sudo').add_veth(
'int-tunnel_bridge_mapping',
'phy-tunnel_bridge_mapping').AndReturn([self.inta, self.intb])
self.mox.StubOutWithMock(ovs_lib, 'get_bridges')
ovs_lib.get_bridges('sudo').AndReturn([self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE])
def test_construct(self):
self.mox.ReplayAll()
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
self.mox.VerifyAll()
def test_construct_vxlan(self):
self.mox.StubOutWithMock(ovs_lib, 'get_installed_ovs_klm_version')
ovs_lib.get_installed_ovs_klm_version().AndReturn("1.10")
self.mox.StubOutWithMock(ovs_lib, 'get_installed_ovs_usr_version')
ovs_lib.get_installed_ovs_usr_version('sudo').AndReturn("1.10")
self.mox.ReplayAll()
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['vxlan'],
self.VETH_MTU)
self.mox.VerifyAll()
def test_provision_local_vlan(self):
ofports = ','.join(TUN_OFPORTS[constants.TYPE_GRE].values())
self.mock_tun_bridge.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=LV_ID,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(LS_ID, ofports))
self.mock_tun_bridge.add_flow(table=constants.TUN_TABLE['gre'],
priority=1,
tun_id=LS_ID,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(LV_ID, constants.LEARN_FROM_TUN))
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, constants.TYPE_GRE, None, LS_ID)
self.mox.VerifyAll()
def test_provision_local_vlan_flat(self):
action_string = 'strip_vlan,normal'
self.mock_map_tun_bridge.add_flow(
priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string)
action_string = 'mod_vlan_vid:%s,normal' % LV_ID
self.mock_int_bridge.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=65535, actions=action_string)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, constants.TYPE_FLAT, 'net1', LS_ID)
self.mox.VerifyAll()
def test_provision_local_vlan_flat_fail(self):
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, constants.TYPE_FLAT, 'net2', LS_ID)
self.mox.VerifyAll()
def test_provision_local_vlan_vlan(self):
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_map_tun_bridge.add_flow(
priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string)
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_int_bridge.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=LV_ID, actions=action_string)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, constants.TYPE_VLAN, 'net1', LS_ID)
self.mox.VerifyAll()
def test_provision_local_vlan_vlan_fail(self):
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, constants.TYPE_VLAN, 'net2', LS_ID)
self.mox.VerifyAll()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge.delete_flows(
table=constants.TUN_TABLE['gre'], tun_id=LS_ID)
self.mock_tun_bridge.delete_flows(dl_vlan=LVM.vlan)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM
a.reclaim_local_vlan(NET_UUID)
self.assertTrue(LVM.vlan in a.available_local_vlans)
self.mox.VerifyAll()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_FLAT.vlan)
self.mock_int_bridge.delete_flows(
dl_vlan=65535, in_port=self.INT_OFPORT)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_FLAT
a.reclaim_local_vlan(NET_UUID)
self.assertTrue(LVM_FLAT.vlan in a.available_local_vlans)
self.mox.VerifyAll()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_VLAN.vlan)
self.mock_int_bridge.delete_flows(
dl_vlan=LV_ID, in_port=self.INT_OFPORT)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_VLAN
a.reclaim_local_vlan(NET_UUID)
self.assertTrue(LVM_VLAN.vlan in a.available_local_vlans)
self.mox.VerifyAll()
def test_port_bound(self):
self.mock_int_bridge.set_db_attribute('Port', VIF_PORT.port_name,
'tag', str(LVM.vlan))
self.mock_int_bridge.delete_flows(in_port=VIF_PORT.ofport)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID)
self.mox.VerifyAll()
def test_port_unbound(self):
self.mox.StubOutWithMock(
ovs_neutron_agent.OVSNeutronAgent, 'reclaim_local_vlan')
ovs_neutron_agent.OVSNeutronAgent.reclaim_local_vlan(NET_UUID)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_unbound(VIF_ID, NET_UUID)
self.mox.VerifyAll()
def test_port_dead(self):
self.mock_int_bridge.set_db_attribute(
'Port', VIF_PORT.port_name, 'tag', ovs_neutron_agent.DEAD_VLAN_TAG)
self.mock_int_bridge.add_flow(priority=2, in_port=VIF_PORT.ofport,
actions='drop')
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = LVM
a.port_dead(VIF_PORT)
self.mox.VerifyAll()
def test_tunnel_update(self):
self.mock_tun_bridge.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1',
'gre', 4789)
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mox.MockAnything, tunnel_id='1', tunnel_ip='10.0.10.1',
tunnel_type=constants.TYPE_GRE)
self.mox.VerifyAll()
def test_tunnel_update_self(self):
self.mox.ReplayAll()
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mox.MockAnything, tunnel_id='1', tunnel_ip='10.0.0.1')
self.mox.VerifyAll()
def test_daemon_loop(self):
reply2 = {'current': set(['tap0']),
'added': set([]),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set([])}
self.mox.StubOutWithMock(log.ContextAdapter, 'exception')
log.ContextAdapter.exception(
_("Error in agent event loop")).AndRaise(
Exception('Fake exception to get out of the loop'))
self.mox.StubOutWithMock(
ovs_neutron_agent.OVSNeutronAgent, 'update_ports')
ovs_neutron_agent.OVSNeutronAgent.update_ports(set()).AndReturn(reply2)
ovs_neutron_agent.OVSNeutronAgent.update_ports(
set(['tap0'])).AndReturn(reply3)
self.mox.StubOutWithMock(
ovs_neutron_agent.OVSNeutronAgent, 'process_network_ports')
ovs_neutron_agent.OVSNeutronAgent.process_network_ports(
{'current': set(['tap0']),
'removed': set([]),
'added': set([])}).AndReturn(False)
ovs_neutron_agent.OVSNeutronAgent.process_network_ports(
{'current': set(['tap0']),
'removed': set([]),
'added': set([])}).AndRaise(
Exception('Fake exception to get out of the loop'))
self.mox.ReplayAll()
q_agent = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1',
self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, mox.VerifyAll() will catch it
try:
q_agent.daemon_loop()
except Exception:
pass
self.mox.VerifyAll()
class TunnelTestWithMTU(TunnelTest):
def setUp(self):
super(TunnelTestWithMTU, self).setUp()
self.VETH_MTU = 1500
self.inta.link.set_mtu(self.VETH_MTU)
self.intb.link.set_mtu(self.VETH_MTU)
| |
import requests
import urllib3
import logging
import pprint
from enum import Enum
from datetime import datetime
from abc import ABCMeta
from .utils import (
underscore_to_camelcase,
camelcase_to_underscore,
dic_to_json,
response_to_dic
)
from .exception import (
raise_from_response,
AuthenticationException
)
logger = logging.getLogger(__name__)
class Resource(metaclass=ABCMeta):
"""
Abstract resource class.
Works as a base class for all other resources, keeping the generic and re-usable functionality.
Provides to the classes that inherit it with a connection pool (:any:`urllib3.connectionpool.HTTPSConnectionPool`)
and methods to make all requests to the anilist api through it.
All resources **must** be singletons.
The only request this class doesn't handle are the authentication ones, managed by :any:`AuthenticationProvider`
"""
_URL = 'https://anilist.co'
"""*Constant.* Base url that is used for all requests. Each resource **must** define it own endpoint based on this
url."""
_ENDPOINT = None
"""Default endpoint for each resource implementation."""
def __init__(self):
super().__init__()
self._pool = urllib3.PoolManager().connection_from_url(Resource._URL)
@property
def _headers(self):
"""
Generates the default headers with the according credentials.
Example::
{
"Authorization": "BEARER youraccestokenhere",
"Content-Type": "application/json"
}
:return: (:obj:`dict`) Default headers for common requests.
"""
auth = AuthenticationProvider.currentAuth()
return {
'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),
'Content-Type': 'application/json'}
def update(self, entity):
return self.put(data=entity.updateData)
def request(self, method, endpoint=None, data=None, headers=None):
"""
Makes a *method* request to *endpoint* with *data* and *headers*.
:param method: (:obj:`str`) String for the http method: GET, POST, PUT DELETE. Other methods are not supported.
:param endpoint: (:obj:`str`, optional) String for the endpoint where the request aims. Remember, all endpoints
refers to :any:`_URL`.
If none, request will aim to :any:`_ENDPOINT`.
Example: `'/api/user/demo/animelist/'`
:param data: (:obj:`dict`, optional) Parameters to be included in the request.
If none, no parameters will be sent.
:param headers: (:obj:`dict`, optional) Headers to be included in the request.
If none, default parameters will be used (see :any:`_headers`).
:raise: See :any:`raise_from_response`
:return: (:obj:`dict`) Response.
"""
headers = headers or self._headers
endpoint = endpoint or self._ENDPOINT
data = dic_to_json(data)
logger.debug('Resource request: %s %s' % (method, endpoint))
logger.debug('Resource request body: %s' % str(data))
logger.debug('Resource request headers: %s' % headers)
response = self._pool.request(
method,
endpoint,
body=data,
headers=headers)
raise_from_response(response)
response = response_to_dic(response)
logger.debug('Resource response: \n' + pprint.pformat(response))
return response
def get(self, endpoint=None, data=None, headers=None):
"""
*Helper.* Calls :any:`request` with `method='GET'`
:param endpoint: See :any:`request`.
:param data: See :any:`request`.
:param headers: See :any:`request`.
:return: (:obj:`dict`) Response.
"""
return self.request('GET', endpoint=endpoint, data=data, headers=headers)
def post(self, endpoint=None, data=None, headers=None):
"""
*Helper.* Calls :any:`request` with `method='POST'`
:param endpoint: See :any:`request`.
:param data: See :any:`request`.
:param headers: See :any:`request`.
:return: (:obj:`dict`) Response.
"""
return self.request('POST', endpoint=endpoint, data=data, headers=headers)
def put(self, endpoint=None, data=None, headers=None):
"""
*Helper.* Calls :any:`request` with `method='PUT'`
:param endpoint: See :any:`request`.
:param data: See :any:`request`.
:param headers: See :any:`request`.
:return: (:obj:`dict`) Response.
"""
return self.request('PUT', endpoint=endpoint, data=data, headers=headers)
def delete(self, endpoint=None, data=None, headers=None):
"""
*Helper.* Calls :any:`request` with `method='DELETE'`
:param endpoint: See :any:`request`.
:param data: See :any:`request`.
:param headers: See :any:`request`.
:return: (:obj:`dict`) Response.
"""
return self.request('DELETE', endpoint=endpoint, data=data, headers=headers)
class Entity(metaclass=ABCMeta):
"""Abstract base class for al classes that are mapped from/to an anilist response."""
__composite__ = {}
"""Define how different implementations of this class compose each other. See :any:`fromResponse`"""
_resource = None
def __init__(self, **kwargs):
# TODO: see if i can remove keyword args
"""
All sub classes **must** override this method. Here is where the json response from the api, converted to a dict
is mapped to the private attributes of each implementation.
Implementation example::
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._id = kwargs.get('id')
self._displayName = kwargs.get('displayName')
:param kwargs: dict with values from the json entity to be mapped.
"""
super().__init__()
self._updateData = {}
@classmethod
def fromResponse(cls, response):
"""
Class method that creates an instance of the implementation class, based on a json :obj:`requests.Response` or
a :obj:`dict`.
The 'magic' here resides in :any:`__composite__` attribute. :any:`__composite__` is a :obj:`dict` that allow an
implementation class to define: each time you find, lets say, 'user' in the json response, take its value pass
it as a parameter of the `fromResponse` method of User class. For this particular example, un the class that
uses User, you **must** define::
__composite__ = {'user': User}
:param response: Base data to create the instance
:return: An instance of the implementation class, composed and populated with the response data.
"""
if isinstance(response, requests.Response):
response = response.json()
dic = {}
for k in response:
if k in cls.__composite__:
dic[underscore_to_camelcase(k)] = cls.__composite__[k].fromResponse(response.get(k))
else:
dic[underscore_to_camelcase(k)] = response.get(k)
return cls(**dic)
@property
def updateData(self):
"""
Each time an updatable attribute is updated, implementation class **must** set the corresponding entry in the
:any:`_updateData` dict. This operation should always be made in the setters.
When the update of a entity is made, the data for the request is obtained through this method so, any change
that isn't in this dict will be ignored.
For now, keys must be the keys of the json request. This doesn't support composite updates cause they are not
needed yet.
:return: :obj:`dict` with the new values of the updatable fields.
"""
return self._updateData
@property
def save(self):
self._resource.put(data=self.updateData)
class Updatable(object):
"""
This class decorates setters to automatically add value changes to ``_updateData``.
"""
_logger = logging.getLogger(__name__ + '.Updatable')
def __init__(self, setter):
self._setter = setter
self._key = camelcase_to_underscore(self._setter.__name__)
self._logger.debug('Updatable field created: ' + self._setter.__name__)
def __call__(self, *args):
self._setter(*args)
if isinstance(args[1], Enum):
args[0].updateData[self._key] = args[1].value
else:
args[0].updateData[self._key] = args[1]
self._logger.debug('Update data changed: ' + str(args[0].updateData))
class GrantType(Enum):
"""
Enum for Authentication grant type.
Possible values:
- authorizationCode
- authorizationPin
- clientCredentials
- refreshToken
"""
authorizationCode = 'authorization_code'
authorizationPin = 'authorization_pin'
clientCredentials = 'client_credentials'
refreshToken = 'refresh_token'
class AuthenticationProvider(object):
"""
*Singleton*. Builder for the Authentication class. Works like a :any:`Resource` but with many specific behavior.
"""
_URL = 'https://anilist.co'
_ENDPOINT = '/api/auth/access_token'
_pool = urllib3.PoolManager().connection_from_url(_URL)
_instance = None
clientId = None
clientSecret = None
redirectUri = None
_logger = logging.getLogger(__name__ + '.AuthenticationProvider')
def __new__(cls, grantType):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, grantType):
super().__init__()
self._grantType = grantType
self._currentAuth = None
if grantType is GrantType.authorizationCode:
self.authenticate = self._codeRequest
elif grantType is GrantType.authorizationPin:
self.authenticate = self._pinRequest
elif grantType is GrantType.clientCredentials:
self.authenticate = self._clientCredentialsRequest
elif grantType is GrantType.refreshToken:
self.authenticate = self._refreshRequest
else:
raise ValueError('Invalid grant type.')
@classmethod
def config(cls, clientId, clientSecret, redirectUri):
"""
Sets all configuration params needed for this class to work. All this values are from Anilist web page.
See `how to create a client <https://anilist-api.readthedocs.io/en/latest/introduction.html#creating-a-client>`_
:param clientId: :obj:`str` Anilist client id.
:param clientSecret: :obj:`str` Anilist client secret.
:param redirectUri: :obj:`str` Anilist redirect uri.
"""
# TODO: make redirectUri not mandatory.
cls.clientId = clientId
cls.clientSecret = clientSecret
cls.redirectUri = redirectUri
@classmethod
def currentAuth(cls):
"""
:return: Current :any:`Authentication` instance.
"""
if not cls._instance:
raise AuthenticationException('AuthenticationProvider is not instantiated.')
auth = cls._instance._currentAuth
if not auth:
raise AuthenticationException('Current authentication is None.')
return auth
@classmethod
def refresh(cls, refreshToken, clientId=None, clientSecret=None):
"""
Force authentication refresh.
:return: a refreshed :any:`Authentication`
"""
return cls._instance._refreshRequest(refreshToken, clientId, clientSecret)
def _authRequest(self, data):
self._logger.debug('Auth request method: ' + 'POST')
self._logger.debug('Auth request url: ' + self._ENDPOINT)
self._logger.debug('Auth request: \n' + pprint.pformat(data))
response = self._pool.request(
'POST',
self._ENDPOINT,
fields=data)
raise_from_response(response)
response = response_to_dic(response)
if data.get('refresh_token') is not None:
response['refresh_token'] = data.get('refresh_token')
auth = Authentication(**response)
self._currentAuth = auth
self._logger.debug('Auth response: \n' + pprint.pformat(response))
return auth
def _refreshRequest(self, refreshToken, clientId=None, clientSecret=None):
clientId = clientId or self.clientId
clientSecret = clientSecret or self.clientSecret
data = {
'grant_type': GrantType.refreshToken.value,
'client_id': clientId,
'client_secret': clientSecret,
'refresh_token': refreshToken}
return self._authRequest(data)
def _codeRequest(self, code, clientId=None, clientSecret=None, redirectUri=None):
clientId = clientId or self.clientId
clientSecret = clientSecret or self.clientSecret
redirectUri = redirectUri or self.redirectUri
data = {
'grant_type': GrantType.authorizationCode.value,
'client_id': clientId,
'client_secret': clientSecret,
'redirect_uri': redirectUri,
'code': code}
return self._authRequest(data)
def _pinRequest(self, pin, clientId=None, clientSecret=None, redirectUri=None):
clientId = clientId or self.clientId
clientSecret = clientSecret or self.clientSecret
redirectUri = redirectUri or self.redirectUri
data = {
'grant_type': GrantType.authorizationPin.value,
'client_id': clientId,
'client_secret': clientSecret,
'redirect_uri': redirectUri,
'code': pin}
return self._authRequest(data)
def _clientCredentialsRequest(self, clientId=None, clientSecret=None):
clientId = clientId or self.clientId
clientSecret = clientSecret or self.clientSecret
data = {
'grant_type': GrantType.clientCredentials.value,
'client_id': clientId,
'client_secret': clientSecret}
return self._authRequest(data)
class Authentication(object):
"""
Represents a Anilist authentication response.
"""
def __init__(self, **kwargs):
super().__init__()
self._accessToken = kwargs.get('access_token')
self._tokenType = kwargs.get('token_type')
self._expiresIn = kwargs.get('expires_in')
self._refreshToken = kwargs.get('refresh_token')
exp = kwargs.get('expires')
self._expires = exp if exp is None else datetime.fromtimestamp(exp)
@classmethod
def fromCode(cls, code, clientId=None, clientSecret=None, redirectUri=None):
"""
Generates a :any:`Authentication` instance from a authentication code.
:param code: :obj:`str` the authentication code
:param clientId: *Optional.*
:param clientSecret: *Optional.*
:param redirectUri: *Optional.*
:return: :any:`Authentication`
"""
return AuthenticationProvider(
GrantType.authorizationCode).authenticate(code, clientId, clientSecret, redirectUri)
@classmethod
def fromPin(cls, pin, clientId=None, clientSecret=None, redirectUri=None):
"""
Generates a :any:`Authentication` instance from a authentication pin.
:param pin: :obj:`str` the authentication pin
:param clientId: *Optional.*
:param clientSecret: *Optional.*
:param redirectUri: *Optional.*
:return: :any:`Authentication`
"""
return AuthenticationProvider(
GrantType.authorizationPin).authenticate(pin, clientId, clientSecret)
@classmethod
def fromCredentials(cls, clientId=None, clientSecret=None):
"""
Generates a :any:`Authentication` instance based on the client credentials (Read only public content and doesn't
have refresh token).
:return: :any:`Authentication`
"""
return AuthenticationProvider(
GrantType.clientCredentials).authenticate(clientId, clientSecret)
@classmethod
def fromRefreshToken(cls, refreshToken, clientId=None, clientSecret=None):
"""
Generates a :any:`Authentication` instance from a refresh token.
:param refreshToken: :obj:`str` the refresh token
:return: :any:`Authentication`
"""
return AuthenticationProvider(GrantType.refreshToken).authenticate(refreshToken)
def refresh(self):
"""
Force authentication refresh.
:return: a refreshed :any:`Authentication`
"""
newAuth = AuthenticationProvider.refresh(self.refreshToken)
self._accessToken = newAuth.accessToken
self._tokenType = newAuth.tokenType
self._expiresIn = newAuth.expiresIn
self._expires = newAuth.expires
def __repr__(self):
return '<%s \'%s\' %s>' % (
self.__class__.__name__,
self.accessToken,
'Expired' if self.isExpired else 'Not expired')
@property
def isExpired(self):
"""
:return: True if the authentication is expired, False otherwise.
"""
return self.expires < datetime.now()
@property
def accessToken(self):
"""
If the authentication has expired, performs a refresh before return de access token.
:return: A valid access token.
"""
if self.isExpired:
self.refresh()
return self._accessToken
@property
def tokenType(self):
return self._tokenType.capitalize()
@property
def expires(self):
return self._expires
@property
def expiresIn(self):
return self._expiresIn
@property
def refreshToken(self):
return self._refreshToken
| |
"""
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite, TestCase)
from test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB
from scipy.stats import multivariate_normal
from scipy.stats import matrix_normal
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, invwishart, chi2, invgamma
from scipy.stats import norm
from scipy.integrate import romb
from common_tests import check_random_state_property
class TestMultivariateNormal(TestCase):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
desired = multivariate_normal.pdf(X, mean, cov)[i, j]
assert_allclose(actual, desired)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = 0.5 * np.ones(n)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestMatrixNormal(TestCase):
def test_bad_input(self):
# Check that bad inputs raise errors
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
# Incorrect dimensions
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
assert_raises(ValueError, matrix_normal, M, U, U)
assert_raises(ValueError, matrix_normal, M, V, V)
assert_raises(ValueError, matrix_normal, M.T, U, V)
# Singular covariance
e = np.linalg.LinAlgError
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
def test_default_inputs(self):
# Check that default argument handling works
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
Z = np.zeros((num_rows, num_cols))
Zr = np.zeros((num_rows, 1))
Zc = np.zeros((1, num_cols))
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
I1 = np.identity(1)
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U).shape,
(num_rows, 1))
assert_equal(matrix_normal.rvs(colcov=V).shape,
(1, num_cols))
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal(mean=M).rowcov, Ir)
assert_equal(matrix_normal(mean=M).colcov, Ic)
assert_equal(matrix_normal(rowcov=U).mean, Zr)
assert_equal(matrix_normal(rowcov=U).colcov, I1)
assert_equal(matrix_normal(colcov=V).mean, Zc)
assert_equal(matrix_normal(colcov=V).rowcov, I1)
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
def test_covariance_expansion(self):
# Check that covariance can be specified with scalar or vector
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
Uv = 0.2*np.ones(num_rows)
Us = 0.2
Vv = 0.1*np.ones(num_cols)
Vs = 0.1
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
0.1*Ic)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
0.1*Ic)
def test_frozen_matrix_normal(self):
for i in range(1,5):
for j in range(1,5):
M = 0.3 * np.ones((i,j))
U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i))
V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j))
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
random_state=1234)
assert_equal(rvs1, rvs2)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(pdf1, pdf2)
logpdf1 = frozen.logpdf(X)
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(logpdf1, logpdf2)
def test_matches_multivariate(self):
# Check that the pdfs match those obtained by vectorising and
# treating as a multivariate normal.
for i in range(1,5):
for j in range(1,5):
M = 0.3 * np.ones((i,j))
U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i))
V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j))
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
logpdf1 = frozen.logpdf(X)
vecX = X.T.flatten()
vecM = M.T.flatten()
cov = np.kron(V,U)
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
assert_allclose(pdf1, pdf2, rtol=1E-10)
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
def test_array_input(self):
# Check array of inputs has the same output as the separate entries.
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
N = 10
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X1 = frozen.rvs(size=N, random_state=1234)
X2 = frozen.rvs(size=N, random_state=4321)
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
assert_equal(X.shape, (2, N, num_rows, num_cols))
array_logpdf = frozen.logpdf(X)
assert_equal(array_logpdf.shape, (2, N))
for i in range(2):
for j in range(N):
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
rowcov=U, colcov=V)
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
def test_moments(self):
# Check that the sample moments match the parameters
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
N = 1000
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(size=N, random_state=1234)
sample_mean = np.mean(X,axis=0)
assert_allclose(sample_mean, M, atol=0.1)
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
assert_allclose(sample_colcov, V, atol=0.1)
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
N*num_cols,num_rows).T)
assert_allclose(sample_rowcov, U, atol=0.1)
class TestDirichlet(TestCase):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((2, 7, 7)) / 14
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.ones((2, 2, 7)) / 4
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 3
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((2, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((5, 7)) / 5
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_simple_values(self):
alpha = np.array([1, 1])
d = dirichlet(alpha)
assert_almost_equal(d.mean(), 0.5)
assert_almost_equal(d.var(), 1. / 12.)
b = beta(1, 1)
assert_almost_equal(d.mean(), b.mean())
assert_almost_equal(d.var(), b.var())
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(TestCase):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestInvwishart(TestCase):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_random_state_property():
scale = np.eye(3)
scale[0,1] = 0.5
scale[1,0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
if __name__ == "__main__":
run_module_suite()
| |
# api wrapper for three.js
__pragma__ ('noanno')
def _ctor(obj):
def _c_(*args):
return __new__(obj (*args))
return _c_
api = __pragma__ ('js',
'{}',
'THREE'
)
WebGLRenderTargetCube = _ctor(api.WebGLRenderTargetCube)
WebGLRenderTarget = _ctor(api.WebGLRenderTarget)
WebGLRenderer = _ctor(api.WebGLRenderer)
ShaderLib = _ctor(api.ShaderLib)
UniformsLib = _ctor(api.UniformsLib)
UniformsUtils = _ctor(api.UniformsUtils)
ShaderChunk = _ctor(api.ShaderChunk)
FogExp2 = _ctor(api.FogExp2)
Fog = _ctor(api.Fog)
Scene = _ctor(api.Scene)
LensFlare = _ctor(api.LensFlare)
Sprite = _ctor(api.Sprite)
LOD = _ctor(api.LOD)
SkinnedMesh = _ctor(api.SkinnedMesh)
Skeleton = _ctor(api.Skeleton)
Bone = _ctor(api.Bone)
Mesh = _ctor(api.Mesh)
LineSegments = _ctor(api.LineSegments)
LineLoop = _ctor(api.LineLoop)
Line = _ctor(api.Line)
Points = _ctor(api.Points)
Group = _ctor(api.Group)
VideoTexture = _ctor(api.VideoTexture)
DataTexture = _ctor(api.DataTexture)
CompressedTexture = _ctor(api.CompressedTexture)
CubeTexture = _ctor(api.CubeTexture)
CanvasTexture = _ctor(api.CanvasTexture)
DepthTexture = _ctor(api.DepthTexture)
Texture = _ctor(api.Texture)
CompressedTextureLoader = _ctor(api.CompressedTextureLoader)
DataTextureLoader = _ctor(api.DataTextureLoader)
CubeTextureLoader = _ctor(api.CubeTextureLoader)
TextureLoader = _ctor(api.TextureLoader)
ObjectLoader = _ctor(api.ObjectLoader)
MaterialLoader = _ctor(api.MaterialLoader)
BufferGeometryLoader = _ctor(api.BufferGeometryLoader)
DefaultLoadingManager = _ctor(api.DefaultLoadingManager)
LoadingManager = _ctor(api.LoadingManager)
JSONLoader = _ctor(api.JSONLoader)
ImageLoader = _ctor(api.ImageLoader)
FontLoader = _ctor(api.FontLoader)
FileLoader = _ctor(api.FileLoader)
Loader = _ctor(api.Loader)
Cache = _ctor(api.Cache)
AudioLoader = _ctor(api.AudioLoader)
SpotLightShadow = _ctor(api.SpotLightShadow)
SpotLight = _ctor(api.SpotLight)
PointLight = _ctor(api.PointLight)
RectAreaLight = _ctor(api.RectAreaLight)
HemisphereLight = _ctor(api.HemisphereLight)
DirectionalLightShadow = _ctor(api.DirectionalLightShadow)
DirectionalLight = _ctor(api.DirectionalLight)
AmbientLight = _ctor(api.AmbientLight)
LightShadow = _ctor(api.LightShadow)
Light = _ctor(api.Light)
StereoCamera = _ctor(api.StereoCamera)
PerspectiveCamera = _ctor(api.PerspectiveCamera)
OrthographicCamera = _ctor(api.OrthographicCamera)
CubeCamera = _ctor(api.CubeCamera)
ArrayCamera = _ctor(api.ArrayCamera)
Camera = _ctor(api.Camera)
AudioListener = _ctor(api.AudioListener)
PositionalAudio = _ctor(api.PositionalAudio)
AudioContext = _ctor(api.AudioContext)
AudioAnalyser = _ctor(api.AudioAnalyser)
Audio = _ctor(api.Audio)
VectorKeyframeTrack = _ctor(api.VectorKeyframeTrack)
StringKeyframeTrack = _ctor(api.StringKeyframeTrack)
QuaternionKeyframeTrack = _ctor(api.QuaternionKeyframeTrack)
NumberKeyframeTrack = _ctor(api.NumberKeyframeTrack)
ColorKeyframeTrack = _ctor(api.ColorKeyframeTrack)
BooleanKeyframeTrack = _ctor(api.BooleanKeyframeTrack)
PropertyMixer = _ctor(api.PropertyMixer)
PropertyBinding = _ctor(api.PropertyBinding)
KeyframeTrack = _ctor(api.KeyframeTrack)
AnimationUtils = _ctor(api.AnimationUtils)
AnimationObjectGroup = _ctor(api.AnimationObjectGroup)
AnimationMixer = _ctor(api.AnimationMixer)
AnimationClip = _ctor(api.AnimationClip)
Uniform = _ctor(api.Uniform)
InstancedBufferGeometry = _ctor(api.InstancedBufferGeometry)
BufferGeometry = _ctor(api.BufferGeometry)
GeometryIdCount = _ctor(api.GeometryIdCount)
Geometry = _ctor(api.Geometry)
InterleavedBufferAttribute = _ctor(api.InterleavedBufferAttribute)
InstancedInterleavedBuffer = _ctor(api.InstancedInterleavedBuffer)
InterleavedBuffer = _ctor(api.InterleavedBuffer)
InstancedBufferAttribute = _ctor(api.InstancedBufferAttribute)
Face3 = _ctor(api.Face3)
Object3D = _ctor(api.Object3D)
Raycaster = _ctor(api.Raycaster)
Layers = _ctor(api.Layers)
EventDispatcher = _ctor(api.EventDispatcher)
Clock = _ctor(api.Clock)
QuaternionLinearInterpolant = _ctor(api.QuaternionLinearInterpolant)
LinearInterpolant = _ctor(api.LinearInterpolant)
DiscreteInterpolant = _ctor(api.DiscreteInterpolant)
CubicInterpolant = _ctor(api.CubicInterpolant)
Interpolant = _ctor(api.Interpolant)
Triangle = _ctor(api.Triangle)
Math = _ctor(api.Math)
Spherical = _ctor(api.Spherical)
Cylindrical = _ctor(api.Cylindrical)
Plane = _ctor(api.Plane)
Frustum = _ctor(api.Frustum)
Sphere = _ctor(api.Sphere)
Ray = _ctor(api.Ray)
Matrix4 = _ctor(api.Matrix4)
Matrix3 = _ctor(api.Matrix3)
Box3 = _ctor(api.Box3)
Box2 = _ctor(api.Box2)
Line3 = _ctor(api.Line3)
Euler = _ctor(api.Euler)
Vector3 = _ctor(api.Vector3)
Quaternion = _ctor(api.Quaternion)
Color = _ctor(api.Color)
MorphBlendMesh = _ctor(api.MorphBlendMesh)
ImmediateRenderObject = _ctor(api.ImmediateRenderObject)
VertexNormalsHelper = _ctor(api.VertexNormalsHelper)
SpotLightHelper = _ctor(api.SpotLightHelper)
SkeletonHelper = _ctor(api.SkeletonHelper)
PointLightHelper = _ctor(api.PointLightHelper)
RectAreaLightHelper = _ctor(api.RectAreaLightHelper)
HemisphereLightHelper = _ctor(api.HemisphereLightHelper)
GridHelper = _ctor(api.GridHelper)
PolarGridHelper = _ctor(api.PolarGridHelper)
FaceNormalsHelper = _ctor(api.FaceNormalsHelper)
DirectionalLightHelper = _ctor(api.DirectionalLightHelper)
CameraHelper = _ctor(api.CameraHelper)
BoxHelper = _ctor(api.BoxHelper)
ArrowHelper = _ctor(api.ArrowHelper)
AxisHelper = _ctor(api.AxisHelper)
CatmullRomCurve3 = _ctor(api.CatmullRomCurve3)
CubicBezierCurve3 = _ctor(api.CubicBezierCurve3)
QuadraticBezierCurve3 = _ctor(api.QuadraticBezierCurve3)
LineCurve3 = _ctor(api.LineCurve3)
ArcCurve = _ctor(api.ArcCurve)
EllipseCurve = _ctor(api.EllipseCurve)
SplineCurve = _ctor(api.SplineCurve)
CubicBezierCurve = _ctor(api.CubicBezierCurve)
QuadraticBezierCurve = _ctor(api.QuadraticBezierCurve)
LineCurve = _ctor(api.LineCurve)
Shape = _ctor(api.Shape)
Path = _ctor(api.Path)
ShapePath = _ctor(api.ShapePath)
Font = _ctor(api.Font)
CurvePath = _ctor(api.CurvePath)
Curve = _ctor(api.Curve)
ShapeUtils = _ctor(api.ShapeUtils)
SceneUtils = _ctor(api.SceneUtils)
WireframeGeometry = _ctor(api.WireframeGeometry)
ParametricGeometry = _ctor(api.ParametricGeometry)
ParametricBufferGeometry = _ctor(api.ParametricBufferGeometry)
TetrahedronGeometry = _ctor(api.TetrahedronGeometry)
TetrahedronBufferGeometry = _ctor(api.TetrahedronBufferGeometry)
OctahedronGeometry = _ctor(api.OctahedronGeometry)
OctahedronBufferGeometry = _ctor(api.OctahedronBufferGeometry)
IcosahedronGeometry = _ctor(api.IcosahedronGeometry)
IcosahedronBufferGeometry = _ctor(api.IcosahedronBufferGeometry)
DodecahedronGeometry = _ctor(api.DodecahedronGeometry)
DodecahedronBufferGeometry = _ctor(api.DodecahedronBufferGeometry)
PolyhedronGeometry = _ctor(api.PolyhedronGeometry)
PolyhedronBufferGeometry = _ctor(api.PolyhedronBufferGeometry)
TubeGeometry = _ctor(api.TubeGeometry)
TubeBufferGeometry = _ctor(api.TubeBufferGeometry)
TorusKnotGeometry = _ctor(api.TorusKnotGeometry)
TorusKnotBufferGeometry = _ctor(api.TorusKnotBufferGeometry)
TorusGeometry = _ctor(api.TorusGeometry)
TorusBufferGeometry = _ctor(api.TorusBufferGeometry)
TextGeometry = _ctor(api.TextGeometry)
TextBufferGeometry = _ctor(api.TextBufferGeometry)
SphereGeometry = _ctor(api.SphereGeometry)
SphereBufferGeometry = _ctor(api.SphereBufferGeometry)
RingGeometry = _ctor(api.RingGeometry)
RingBufferGeometry = _ctor(api.RingBufferGeometry)
PlaneGeometry = _ctor(api.PlaneGeometry)
PlaneBufferGeometry = _ctor(api.PlaneBufferGeometry)
LatheGeometry = _ctor(api.LatheGeometry)
LatheBufferGeometry = _ctor(api.LatheBufferGeometry)
ShapeGeometry = _ctor(api.ShapeGeometry)
ShapeBufferGeometry = _ctor(api.ShapeBufferGeometry)
ExtrudeGeometry = _ctor(api.ExtrudeGeometry)
ExtrudeBufferGeometry = _ctor(api.ExtrudeBufferGeometry)
EdgesGeometry = _ctor(api.EdgesGeometry)
ConeGeometry = _ctor(api.ConeGeometry)
ConeBufferGeometry = _ctor(api.ConeBufferGeometry)
CylinderGeometry = _ctor(api.CylinderGeometry)
CylinderBufferGeometry = _ctor(api.CylinderBufferGeometry)
CircleGeometry = _ctor(api.CircleGeometry)
CircleBufferGeometry = _ctor(api.CircleBufferGeometry)
BoxGeometry = _ctor(api.BoxGeometry)
BoxBufferGeometry = _ctor(api.BoxBufferGeometry)
ShadowMaterial = _ctor(api.ShadowMaterial)
SpriteMaterial = _ctor(api.SpriteMaterial)
RawShaderMaterial = _ctor(api.RawShaderMaterial)
ShaderMaterial = _ctor(api.ShaderMaterial)
PointsMaterial = _ctor(api.PointsMaterial)
MeshPhysicalMaterial = _ctor(api.MeshPhysicalMaterial)
MeshStandardMaterial = _ctor(api.MeshStandardMaterial)
MeshPhongMaterial = _ctor(api.MeshPhongMaterial)
MeshToonMaterial = _ctor(api.MeshToonMaterial)
MeshNormalMaterial = _ctor(api.MeshNormalMaterial)
MeshLambertMaterial = _ctor(api.MeshLambertMaterial)
MeshDepthMaterial = _ctor(api.MeshDepthMaterial)
MeshBasicMaterial = _ctor(api.MeshBasicMaterial)
LineDashedMaterial = _ctor(api.LineDashedMaterial)
LineBasicMaterial = _ctor(api.LineBasicMaterial)
Material = _ctor(api.Material)
Float64BufferAttribute = _ctor(api.Float64BufferAttribute)
Float32BufferAttribute = _ctor(api.Float32BufferAttribute)
Uint32BufferAttribute = _ctor(api.Uint32BufferAttribute)
Int32BufferAttribute = _ctor(api.Int32BufferAttribute)
Uint16BufferAttribute = _ctor(api.Uint16BufferAttribute)
Int16BufferAttribute = _ctor(api.Int16BufferAttribute)
Uint8ClampedBufferAttribute = _ctor(api.Uint8ClampedBufferAttribute)
Uint8BufferAttribute = _ctor(api.Uint8BufferAttribute)
Int8BufferAttribute = _ctor(api.Int8BufferAttribute)
BufferAttribute = _ctor(api.BufferAttribute)
REVISION = _ctor(api.REVISION)
MOUSE = _ctor(api.MOUSE)
CullFaceNone = _ctor(api.CullFaceNone)
CullFaceBack = _ctor(api.CullFaceBack)
CullFaceFront = _ctor(api.CullFaceFront)
CullFaceFrontBack = _ctor(api.CullFaceFrontBack)
FrontFaceDirectionCW = _ctor(api.FrontFaceDirectionCW)
FrontFaceDirectionCCW = _ctor(api.FrontFaceDirectionCCW)
BasicShadowMap = _ctor(api.BasicShadowMap)
PCFShadowMap = _ctor(api.PCFShadowMap)
PCFSoftShadowMap = _ctor(api.PCFSoftShadowMap)
FrontSide = _ctor(api.FrontSide)
BackSide = _ctor(api.BackSide)
DoubleSide = _ctor(api.DoubleSide)
FlatShading = _ctor(api.FlatShading)
SmoothShading = _ctor(api.SmoothShading)
NoColors = _ctor(api.NoColors)
FaceColors = _ctor(api.FaceColors)
VertexColors = _ctor(api.VertexColors)
NoBlending = _ctor(api.NoBlending)
NormalBlending = _ctor(api.NormalBlending)
AdditiveBlending = _ctor(api.AdditiveBlending)
SubtractiveBlending = _ctor(api.SubtractiveBlending)
MultiplyBlending = _ctor(api.MultiplyBlending)
CustomBlending = _ctor(api.CustomBlending)
AddEquation = _ctor(api.AddEquation)
SubtractEquation = _ctor(api.SubtractEquation)
ReverseSubtractEquation = _ctor(api.ReverseSubtractEquation)
MinEquation = _ctor(api.MinEquation)
MaxEquation = _ctor(api.MaxEquation)
ZeroFactor = _ctor(api.ZeroFactor)
OneFactor = _ctor(api.OneFactor)
SrcColorFactor = _ctor(api.SrcColorFactor)
OneMinusSrcColorFactor = _ctor(api.OneMinusSrcColorFactor)
SrcAlphaFactor = _ctor(api.SrcAlphaFactor)
OneMinusSrcAlphaFactor = _ctor(api.OneMinusSrcAlphaFactor)
DstAlphaFactor = _ctor(api.DstAlphaFactor)
OneMinusDstAlphaFactor = _ctor(api.OneMinusDstAlphaFactor)
DstColorFactor = _ctor(api.DstColorFactor)
OneMinusDstColorFactor = _ctor(api.OneMinusDstColorFactor)
SrcAlphaSaturateFactor = _ctor(api.SrcAlphaSaturateFactor)
NeverDepth = _ctor(api.NeverDepth)
AlwaysDepth = _ctor(api.AlwaysDepth)
LessDepth = _ctor(api.LessDepth)
LessEqualDepth = _ctor(api.LessEqualDepth)
EqualDepth = _ctor(api.EqualDepth)
GreaterEqualDepth = _ctor(api.GreaterEqualDepth)
GreaterDepth = _ctor(api.GreaterDepth)
NotEqualDepth = _ctor(api.NotEqualDepth)
MultiplyOperation = _ctor(api.MultiplyOperation)
MixOperation = _ctor(api.MixOperation)
AddOperation = _ctor(api.AddOperation)
NoToneMapping = _ctor(api.NoToneMapping)
LinearToneMapping = _ctor(api.LinearToneMapping)
ReinhardToneMapping = _ctor(api.ReinhardToneMapping)
Uncharted2ToneMapping = _ctor(api.Uncharted2ToneMapping)
CineonToneMapping = _ctor(api.CineonToneMapping)
UVMapping = _ctor(api.UVMapping)
CubeReflectionMapping = _ctor(api.CubeReflectionMapping)
CubeRefractionMapping = _ctor(api.CubeRefractionMapping)
EquirectangularReflectionMapping = _ctor(api.EquirectangularReflectionMapping)
EquirectangularRefractionMapping = _ctor(api.EquirectangularRefractionMapping)
SphericalReflectionMapping = _ctor(api.SphericalReflectionMapping)
CubeUVReflectionMapping = _ctor(api.CubeUVReflectionMapping)
CubeUVRefractionMapping = _ctor(api.CubeUVRefractionMapping)
RepeatWrapping = _ctor(api.RepeatWrapping)
ClampToEdgeWrapping = _ctor(api.ClampToEdgeWrapping)
MirroredRepeatWrapping = _ctor(api.MirroredRepeatWrapping)
NearestFilter = _ctor(api.NearestFilter)
NearestMipMapNearestFilter = _ctor(api.NearestMipMapNearestFilter)
NearestMipMapLinearFilter = _ctor(api.NearestMipMapLinearFilter)
LinearFilter = _ctor(api.LinearFilter)
LinearMipMapNearestFilter = _ctor(api.LinearMipMapNearestFilter)
LinearMipMapLinearFilter = _ctor(api.LinearMipMapLinearFilter)
UnsignedByteType = _ctor(api.UnsignedByteType)
ByteType = _ctor(api.ByteType)
ShortType = _ctor(api.ShortType)
UnsignedShortType = _ctor(api.UnsignedShortType)
IntType = _ctor(api.IntType)
UnsignedIntType = _ctor(api.UnsignedIntType)
FloatType = _ctor(api.FloatType)
HalfFloatType = _ctor(api.HalfFloatType)
UnsignedShort4444Type = _ctor(api.UnsignedShort4444Type)
UnsignedShort5551Type = _ctor(api.UnsignedShort5551Type)
UnsignedShort565Type = _ctor(api.UnsignedShort565Type)
UnsignedInt248Type = _ctor(api.UnsignedInt248Type)
AlphaFormat = _ctor(api.AlphaFormat)
RGBFormat = _ctor(api.RGBFormat)
RGBAFormat = _ctor(api.RGBAFormat)
LuminanceFormat = _ctor(api.LuminanceFormat)
LuminanceAlphaFormat = _ctor(api.LuminanceAlphaFormat)
RGBEFormat = _ctor(api.RGBEFormat)
DepthFormat = _ctor(api.DepthFormat)
DepthStencilFormat = _ctor(api.DepthStencilFormat)
RGB_S3TC_DXT1_Format = _ctor(api.RGB_S3TC_DXT1_Format)
RGBA_S3TC_DXT1_Format = _ctor(api.RGBA_S3TC_DXT1_Format)
RGBA_S3TC_DXT3_Format = _ctor(api.RGBA_S3TC_DXT3_Format)
RGBA_S3TC_DXT5_Format = _ctor(api.RGBA_S3TC_DXT5_Format)
RGB_PVRTC_4BPPV1_Format = _ctor(api.RGB_PVRTC_4BPPV1_Format)
RGB_PVRTC_2BPPV1_Format = _ctor(api.RGB_PVRTC_2BPPV1_Format)
RGBA_PVRTC_4BPPV1_Format = _ctor(api.RGBA_PVRTC_4BPPV1_Format)
RGBA_PVRTC_2BPPV1_Format = _ctor(api.RGBA_PVRTC_2BPPV1_Format)
RGB_ETC1_Format = _ctor(api.RGB_ETC1_Format)
LoopOnce = _ctor(api.LoopOnce)
LoopRepeat = _ctor(api.LoopRepeat)
LoopPingPong = _ctor(api.LoopPingPong)
InterpolateDiscrete = _ctor(api.InterpolateDiscrete)
InterpolateLinear = _ctor(api.InterpolateLinear)
InterpolateSmooth = _ctor(api.InterpolateSmooth)
ZeroCurvatureEnding = _ctor(api.ZeroCurvatureEnding)
ZeroSlopeEnding = _ctor(api.ZeroSlopeEnding)
WrapAroundEnding = _ctor(api.WrapAroundEnding)
TrianglesDrawMode = _ctor(api.TrianglesDrawMode)
TriangleStripDrawMode = _ctor(api.TriangleStripDrawMode)
TriangleFanDrawMode = _ctor(api.TriangleFanDrawMode)
LinearEncoding = _ctor(api.LinearEncoding)
sRGBEncoding = _ctor(api.sRGBEncoding)
GammaEncoding = _ctor(api.GammaEncoding)
RGBEEncoding = _ctor(api.RGBEEncoding)
LogLuvEncoding = _ctor(api.LogLuvEncoding)
RGBM7Encoding = _ctor(api.RGBM7Encoding)
RGBM16Encoding = _ctor(api.RGBM16Encoding)
RGBDEncoding = _ctor(api.RGBDEncoding)
BasicDepthPacking = _ctor(api.BasicDepthPacking)
RGBADepthPacking = _ctor(api.RGBADepthPacking)
CubeGeometry = _ctor(api.CubeGeometry)
Face4 = _ctor(api.Face4)
LineStrip = _ctor(api.LineStrip)
LinePieces = _ctor(api.LinePieces)
MeshFaceMaterial = _ctor(api.MeshFaceMaterial)
MultiMaterial = _ctor(api.MultiMaterial)
PointCloud = _ctor(api.PointCloud)
Particle = _ctor(api.Particle)
ParticleSystem = _ctor(api.ParticleSystem)
PointCloudMaterial = _ctor(api.PointCloudMaterial)
ParticleBasicMaterial = _ctor(api.ParticleBasicMaterial)
ParticleSystemMaterial = _ctor(api.ParticleSystemMaterial)
Vertex = _ctor(api.Vertex)
DynamicBufferAttribute = _ctor(api.DynamicBufferAttribute)
Int8Attribute = _ctor(api.Int8Attribute)
Uint8Attribute = _ctor(api.Uint8Attribute)
Uint8ClampedAttribute = _ctor(api.Uint8ClampedAttribute)
Int16Attribute = _ctor(api.Int16Attribute)
Uint16Attribute = _ctor(api.Uint16Attribute)
Int32Attribute = _ctor(api.Int32Attribute)
Uint32Attribute = _ctor(api.Uint32Attribute)
Float32Attribute = _ctor(api.Float32Attribute)
Float64Attribute = _ctor(api.Float64Attribute)
ClosedSplineCurve3 = _ctor(api.ClosedSplineCurve3)
SplineCurve3 = _ctor(api.SplineCurve3)
Spline = _ctor(api.Spline)
BoundingBoxHelper = _ctor(api.BoundingBoxHelper)
EdgesHelper = _ctor(api.EdgesHelper)
WireframeHelper = _ctor(api.WireframeHelper)
XHRLoader = _ctor(api.XHRLoader)
BinaryTextureLoader = _ctor(api.BinaryTextureLoader)
GeometryUtils = _ctor(api.GeometryUtils)
ImageUtils = _ctor(api.ImageUtils)
Projector = _ctor(api.Projector)
CanvasRenderer = _ctor(api.CanvasRenderer)
| |
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler weights.
"""
from oslo.serialization import jsonutils
from nova import context
from nova import exception
from nova.openstack.common.fixture import mockpatch
from nova.scheduler import weights
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.scheduler import fakes
class TestWeighedHost(test.NoDBTestCase):
def test_dict_conversion(self):
host_state = fakes.FakeHostState('somehost', None, {})
host = weights.WeighedHost(host_state, 'someweight')
expected = {'weight': 'someweight',
'host': 'somehost'}
self.assertThat(host.to_dict(), matchers.DictMatches(expected))
def test_all_weighers(self):
classes = weights.all_weighers()
class_names = [cls.__name__ for cls in classes]
self.assertIn('RAMWeigher', class_names)
self.assertIn('MetricsWeigher', class_names)
self.assertIn('IoOpsWeigher', class_names)
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.ram.RAMWeigher'])
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weight_classes,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(1, weighed_host.weight)
self.assertEqual('host4', weighed_host.obj.host)
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(0, weighed_host.weight)
self.assertEqual('negative', weighed_host.obj.host)
class MetricsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(MetricsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES_METRICS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.metrics.MetricsWeigher'])
def _get_weighed_host(self, hosts, setting, weight_properties=None):
if not weight_properties:
weight_properties = {}
self.flags(weight_setting=setting, group='metrics')
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, settings, expected_weight, expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list, settings)
self.assertEqual(expected_weight, weighed_host.weight)
self.assertEqual(expected_host, weighed_host.obj.host)
def test_single_resource(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host4 should win:
setting = ['foo=1']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host2 should win:
setting = ['foo=0.0001', 'bar=1']
self._do_test(setting, 1.0, 'host2')
def test_single_resourcenegtive_ratio(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host1 should win:
setting = ['foo=-1']
self._do_test(setting, 1.0, 'host1')
def test_multiple_resource_missing_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource_wrong_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar = 2.0t']
self._do_test(setting, 1.0, 'host4')
def _check_parsing_result(self, weigher, setting, results):
self.flags(weight_setting=setting, group='metrics')
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
self._check_parsing_result(weigher,
['foo=1'],
[('foo', 1.0)])
self._check_parsing_result(weigher,
['foo=1', 'bar=-2.1'],
[('foo', 1.0), ('bar', -2.1)])
self._check_parsing_result(weigher,
['foo=a1', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['foo', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['=5', 'bar=-2.1'],
[('bar', -2.1)])
def test_metric_not_found_required(self):
setting = ['foo=1', 'zot=2']
self.assertRaises(exception.ComputeHostMetricNotFound,
self._do_test,
setting,
8192,
'host4')
def test_metric_not_found_non_required(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# host5: foo=768, bar=0, zot=1
# host6: foo=2048, bar=0, zot=2
# so, host5 should win:
self.flags(required=False, group='metrics')
setting = ['foo=0.0001', 'zot=-1']
self._do_test(setting, 1.0, 'host5')
COMPUTE_NODES_IO_OPS = [
# host1: num_io_ops=1
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '1'})),
# host2: num_io_ops=2
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '2'})),
# host3: num_io_ops=0, so host3 should win in the case of default
# io_ops_weight_multiplier configure.
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '0'})),
# host4: num_io_ops=4, so host4 should win in the case of positive
# io_ops_weight_multiplier configure.
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
stats=jsonutils.dumps({'io_workload': '4'})),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
class IoOpsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(IoOpsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=COMPUTE_NODES_IO_OPS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.io_ops.IoOpsWeigher'])
def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
if io_ops_weight_multiplier is not None:
self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, {})[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, io_ops_weight_multiplier, expected_weight,
expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
io_ops_weight_multiplier)
self.assertEqual(weighed_host.weight, expected_weight)
if expected_host:
self.assertEqual(weighed_host.obj.host, expected_host)
def test_io_ops_weight_multiplier_by_default(self):
self._do_test(io_ops_weight_multiplier=None,
expected_weight=0.0,
expected_host='host3')
def test_io_ops_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self._do_test(io_ops_weight_multiplier=0.0,
expected_weight=0.0,
expected_host=None)
def test_io_ops_weight_multiplier_positive_value(self):
self._do_test(io_ops_weight_multiplier=2.0,
expected_weight=2.0,
expected_host='host4')
| |
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
# Global environment and expression parsing for the PNaCl driver
# This dictionary initializes a shell-like environment.
# Shell escaping and ${} substitution are provided.
# See "class Environment" defined later for the implementation.
from driver_log import Log, DriverExit
from shelltools import shell
import types
INITIAL_ENV = {
# Set by DriverMain
'DRIVER_PATH' : '', # Absolute path to this driver invocation
'DRIVER_BIN' : '', # PNaCl driver bin/ directory
'BASE_NACL' : '${@FindBaseNaCl}', # Absolute path of native_client/
'BASE_TOOLCHAIN' : '${@FindBaseToolchain}', # Absolute path to toolchain/
'BASE' : '${@FindBasePNaCl}', # Absolute path to PNaCl
'BUILD_OS' : '${@GetBuildOS}', # "linux", "darwin" or "windows"
'BUILD_ARCH' : '${@GetBuildArch}', # "x86_64" or "i686" or "i386"
# Directories
'BASE_HOST' : '${BASE}/host_${HOST_ARCH}',
'BASE_LLVM' : '${BASE_HOST}',
'BASE_BINUTILS' : '${BASE_HOST}',
'BASE_LIB_NATIVE' : '${BASE}/lib-',
'BASE_NEWLIB' : '${BASE}/newlib',
'BASE_GLIBC' : '${BASE}/glibc',
'BASE_LIBMODE' : '${BASE}/${LIBMODE}',
'BASE_USR' : '${BASE_LIBMODE}/usr',
'BASE_SDK' : '${BASE_LIBMODE}/sdk',
'BASE_LIB' : '${BASE_LIBMODE}/lib',
'BASE_USR_ARCH' : '${BASE_USR_%BCLIB_ARCH%}',
'BASE_USR_X8664' : '${BASE_LIBMODE}/usr-bc-x86-64',
'BASE_LIB_ARCH' : '${BASE_LIB_%BCLIB_ARCH%}',
'BASE_LIB_X8664' : '${BASE_LIBMODE}/lib-bc-x86-64',
'BASE_LLVM_BIN' : '${BASE_LLVM}/bin',
'TRANSLATOR_BIN' :
'${BASE_TOOLCHAIN}/pnacl_translator/${STANDARD_ARCH}/bin',
# TODO(pdox): Unify this with ARCH.
'STANDARD_ARCH' : '${STANDARD_ARCH_%ARCH%}',
'STANDARD_ARCH_X8632' : 'i686',
'STANDARD_ARCH_X8664' : 'x86_64',
'STANDARD_ARCH_ARM' : 'armv7',
'STANDARD_ARCH_MIPS32': 'mips32',
'SCONS_OUT' : '${BASE_NACL}/scons-out',
# Driver settings
'ARCH' : '', # Target architecture
'BIAS' : 'NONE', # This can be 'NONE', 'ARM', 'MIPS32', 'X8632' or
# 'X8664'.
# When not set to none, this causes the front-end to
# act like a target-specific compiler. This bias is
# currently needed while compiling newlib,
# and some scons tests.
'DRY_RUN' : '0',
'DEBUG' : '0', # Print out internal actions
'RECURSE' : '0', # In a recursive driver call
'SAVE_TEMPS' : '0', # Do not clean up temporary files
'SANDBOXED' : '0', # Use sandboxed toolchain for this arch. (main switch)
'HAS_FRONTEND': '', # Set by ReadConfig(). '1' if the driver install
# has support for front-end bitcode tools, or '0'
# if it only has the backend translator.
'FORCE_INTERMEDIATE_S': '0',
'USE_EMULATOR' : '0',
'USE_BOOTSTRAP' : '${BUILD_OS==linux ? 1 : 0}',
# Args passed from one driver invocation to another
'INHERITED_DRIVER_ARGS' : '',
'LIBMODE' : '', # glibc or newlib (set by ReadConfig)
'LIBMODE_GLIBC' : '${LIBMODE==glibc ? 1 : 0}',
'LIBMODE_NEWLIB' : '${LIBMODE==newlib ? 1 : 0}',
'BCLIB_ARCH' : '',
# Logging settings
'LOGGING' : '0', # True if logging is enabled.
'LOG_VERBOSE' : '0', # Log to stdout (--pnacl-driver-verbose)
'LOG_TO_FILE' : '0', # Log to file (--pnacl-driver-log-to-file)
'LOG_FILENAME' : '${BASE}/driver.log',
'LOG_FILE_SIZE_LIMIT': str(20 * 1024 * 1024),
# Conventions
'SO_EXT' : '${SO_EXT_%BUILD_OS%}',
'SO_EXT_darwin' : '.dylib',
'SO_EXT_linux' : '.so',
'SO_EXT_windows' : '.dll',
'SO_DIR' : '${SO_DIR_%BUILD_OS%}',
'SO_DIR_darwin' : 'lib',
'SO_DIR_linux' : 'lib',
'SO_DIR_windows' : 'bin', # On Windows, DLLs are placed in bin/
# because the dynamic loader searches %PATH%
'SO_PREFIX' : '${SO_PREFIX_%BUILD_OS%}',
'SO_PREFIX_darwin' : 'lib',
'SO_PREFIX_linux' : 'lib',
'SO_PREFIX_windows': 'cyg',
'EXEC_EXT' : '${EXEC_EXT_%BUILD_OS%}',
'EXEC_EXT_darwin' : '',
'EXEC_EXT_linux' : '',
'EXEC_EXT_windows': '.exe',
'SCONS_OS' : '${SCONS_OS_%BUILD_OS%}',
'SCONS_OS_linux' : 'linux',
'SCONS_OS_darwin' : 'mac',
'SCONS_OS_windows' : 'win',
# llvm goldplugin
'GOLD_PLUGIN_SO' : '${BASE_LLVM}/${SO_DIR}/${SO_PREFIX}LLVMgold${SO_EXT}',
'SCONS_STAGING' : '${SCONS_STAGING_%ARCH%}',
'SCONS_STAGING_X8632' : '${SCONS_OUT}/opt-${SCONS_OS}-x86-32/staging',
'SCONS_STAGING_X8664' : '${SCONS_OUT}/opt-${SCONS_OS}-x86-64/staging',
'SCONS_STAGING_ARM' : '${SCONS_OUT}/opt-${SCONS_OS}-arm/staging',
'SCONS_STAGING_MIPS32': '${SCONS_OUT}/opt-${SCONS_OS}-mips32/staging',
'SEL_UNIVERSAL_PREFIX': '${USE_EMULATOR ? ${EMULATOR}}',
'SEL_UNIVERSAL' : '${SCONS_STAGING}/sel_universal${EXEC_EXT}',
# NOTE: -Q skips sel_ldr qualification tests, -c -c skips validation
# NOTE: We are not using -B to load the IRT, since the translators do not
# use the IRT.
'SEL_UNIVERSAL_FLAGS' : '--abort_on_error ' +
'--uses_reverse_service ' +
'${USE_EMULATOR ? -Q -c -c --command_prefix ${EMULATOR}}',
'EMULATOR' : '${EMULATOR_%ARCH%}',
'EMULATOR_X8632' : '',
'EMULATOR_X8664' : '',
# NOTE: this is currently the only dependency on the arm trusted TC
'EMULATOR_ARM' :
'${BASE_NACL}/toolchain/linux_arm-trusted/run_under_qemu_arm',
'EMULATOR_MIPS32' :
'${BASE_NACL}/toolchain/linux_mips-trusted/run_under_qemu_mips32',
'SEL_LDR' : '${SCONS_STAGING}/sel_ldr${EXEC_EXT}',
'BOOTSTRAP_LDR' : '${SCONS_STAGING}/nacl_helper_bootstrap${EXEC_EXT}',
# sandboxed llvm backend
'LLC_SB' : '${TRANSLATOR_BIN}/pnacl-llc.nexe',
# sandboxed linker (gold based)
'LD_SB' : '${TRANSLATOR_BIN}/ld.nexe',
# Bitcode LLVM tools
'CLANG' : '${BASE_LLVM_BIN}/clang${EXEC_EXT}',
# 'clang++' doesn't work on Windows (outside of Cygwin),
# because it is a symlink. '-ccc-cxx' enables C++ mode.
'CLANGXX' : '${BASE_LLVM_BIN}/clang${EXEC_EXT} -ccc-cxx',
'LLVM_OPT' : '${BASE_LLVM_BIN}/opt${EXEC_EXT}',
'LLVM_DIS' : '${BASE_LLVM_BIN}/llvm-dis${EXEC_EXT}',
'LLVM_NM' : '${BASE_LLVM_BIN}/llvm-nm${EXEC_EXT}',
# llvm-as compiles llvm assembly (.ll) to bitcode (.bc/.po)
'LLVM_AS' : '${BASE_LLVM_BIN}/llvm-as${EXEC_EXT}',
'LLVM_BCWRAP' : '${BASE_LLVM_BIN}/bc-wrap${EXEC_EXT}',
'PNACL_ABICHECK': '${BASE_LLVM_BIN}/pnacl-abicheck${EXEC_EXT}',
# Native LLVM tools
'LLVM_PNACL_LLC': '${BASE_LLVM_BIN}/pnacl-llc${EXEC_EXT}',
# llvm-mc is llvm's native assembler
'LLVM_MC' : '${BASE_LLVM_BIN}/llvm-mc${EXEC_EXT}',
# Binutils
'BINUTILS_BASE' : '${BASE_BINUTILS}/bin/le32-nacl-',
'OBJDUMP' : '${BINUTILS_BASE}objdump${EXEC_EXT}',
'NM' : '${BINUTILS_BASE}nm${EXEC_EXT}',
'AR' : '${BINUTILS_BASE}ar${EXEC_EXT}',
'RANLIB' : '${BINUTILS_BASE}ranlib${EXEC_EXT}',
'READELF' : '${BINUTILS_BASE}readelf${EXEC_EXT}',
'STRIP' : '${BINUTILS_BASE}strip${EXEC_EXT}',
# linker (used for both bitcode and ELF linking)
'LD' : '${BINUTILS_BASE}ld.gold${EXEC_EXT}',
# Use the default command line arguments to the sandboxed translator.
'USE_DEFAULT_CMD_LINE': '1',
}
######################################################################
#
# Environment
#
######################################################################
def ParseError(s, leftpos, rightpos, msg):
Log.Error("Parse Error: %s", msg)
Log.Error(' ' + s)
Log.Error(' ' + (' '*leftpos) + ('^'*(rightpos - leftpos + 1)))
DriverExit(1)
# Find the leftmost position in "s" which begins a substring
# in "strset", starting at "pos".
# For example:
# FindFirst('hello world', 0, ['h','o']) = ('h', 0)
# FindFirst('hello world', 1, ['h','o']) = ('o', 4)
# FindFirst('hello world', 0, ['x']) = (None,11)
def FindFirst(s, pos, strset):
m = {}
for ss in strset:
m[s.find(ss, pos)] = ss
if -1 in m:
del m[-1]
if len(m) == 0:
return (None, len(s))
pos = min(m)
return (m[pos], pos)
class Environment(object):
functions = {}
@classmethod
def register(cls, func):
""" Register a function for use in the evaluator """
cls.functions[func.__name__] = func
return func
def __init__(self):
self.stack = []
self.reset()
def reset(self):
self.data = dict(INITIAL_ENV)
def update(self, extra):
self.data.update(extra)
def dump(self):
for (k,v) in self.data.iteritems():
print '%s == %s' % (k,v)
def push(self):
self.stack.append(self.data)
self.data = dict(self.data) # Make a copy
def pop(self):
self.data = self.stack.pop()
def has(self, varname):
return varname in self.data
def getraw(self, varname):
return self.eval(self.data[varname])
# Evaluate a variable from the environment.
# Returns a list of terms.
def get(self, varname):
return shell.split(self.getraw(varname))
# Retrieve a variable from the environment which
# is a single term. Returns a string.
def getone(self, varname):
return shell.unescape(self.getraw(varname))
def getbool(self, varname):
return bool(int(self.getone(varname)))
def setbool(self, varname, val):
if val:
self.set(varname, '1')
else:
self.set(varname, '0')
# Set a variable in the environment without shell-escape
def setraw(self, varname, val):
self.data[varname] = val
# Set one or more variables using named arguments
def setmany(self, **kwargs):
for k,v in kwargs.iteritems():
if isinstance(v, str):
self.set(k, v)
else:
self.set(k, *v)
def clear(self, varname):
self.data[varname] = ''
# Set a variable to one or more terms, applying shell-escape.
def set(self, varname, *vals):
self.clear(varname)
self.append(varname, *vals)
# Append one or more terms to a variable in the
# environment, applying shell-escape.
def append(self, varname, *vals):
escaped = [ shell.escape(v) for v in vals ]
if len(self.data[varname]) > 0:
self.data[varname] += ' '
self.data[varname] += ' '.join(escaped)
# Evaluate an expression s
def eval(self, s):
(result, i) = self.eval_expr(s, 0, [])
assert(i == len(s))
return result
######################################################################
# EXPRESSION EVALUATION CODE
# Context Free Grammar:
#
# str = empty | string literal
# expr = str | expr '$' '{' bracket_expr '}' expr
# bracket_expr = varname | boolexpr ? expr | boolexpr ? expr : expr | @call
# boolexpr = boolval | boolval '&&' boolexpr | boolval '||' boolexpr
# boolval = varname | !varname | #varname | !#varname | varname '==' str
# varname = str | varname '%' bracket_expr '%' varname
# call = func | func ':' arglist
# func = str
# arglist = empty | arg ':' arglist
#
# Do not call these functions outside of this class.
# The env.eval method is the external interface to the evaluator.
######################################################################
# Evaluate a string literal
def eval_str(self, s, pos, terminators):
(_,i) = FindFirst(s, pos, terminators)
return (s[pos:i], i)
# Evaluate %var% substitutions inside a variable name.
# Returns (the_actual_variable_name, endpos)
# Terminated by } character
def eval_varname(self, s, pos, terminators):
(_,i) = FindFirst(s, pos, ['%'] + terminators)
leftpart = s[pos:i].strip(' ')
if i == len(s) or s[i] in terminators:
return (leftpart, i)
(middlepart, j) = self.eval_bracket_expr(s, i+1, ['%'])
if j == len(s) or s[j] != '%':
ParseError(s, i, j, "Unterminated %")
(rightpart, k) = self.eval_varname(s, j+1, terminators)
fullname = leftpart + middlepart + rightpart
fullname = fullname.strip()
return (fullname, k)
# Absorb whitespace
def eval_whitespace(self, s, pos):
i = pos
while i < len(s) and s[i] == ' ':
i += 1
return (None, i)
def eval_bool_val(self, s, pos, terminators):
(_,i) = self.eval_whitespace(s, pos)
if s[i] == '!':
negated = True
i += 1
else:
negated = False
(_,i) = self.eval_whitespace(s, i)
if s[i] == '#':
uselen = True
i += 1
else:
uselen = False
(varname, j) = self.eval_varname(s, i, ['=']+terminators)
if j == len(s):
# This is an error condition one level up. Don't evaluate anything.
return (False, j)
if varname not in self.data:
ParseError(s, i, j, "Undefined variable '%s'" % varname)
vardata = self.data[varname]
contents = self.eval(vardata)
if s[j] == '=':
# String equality test
if j+1 == len(s) or s[j+1] != '=':
ParseError(s, j, j, "Unexpected token")
if uselen:
ParseError(s, j, j, "Cannot combine == and #")
(_,j) = self.eval_whitespace(s, j+2)
(literal_str,j) = self.eval_str(s, j, [' ']+terminators)
(_,j) = self.eval_whitespace(s, j)
if j == len(s):
return (False, j) # Error one level up
else:
literal_str = None
if uselen:
val = (len(contents) != 0)
elif literal_str is not None:
val = (contents == literal_str)
else:
if contents not in ('0','1'):
ParseError(s, j, j,
"%s evaluated to %s, which is not a boolean!" % (varname, contents))
val = bool(int(contents))
return (negated ^ val, j)
# Evaluate a boolexpr
def eval_bool_expr(self, s, pos, terminators):
(boolval1, i) = self.eval_bool_val(s, pos, ['&','|']+terminators)
if i == len(s):
# This is an error condition one level up. Don't evaluate anything.
return (False, i)
if s[i] in ('&','|'):
# and/or expression
if i+1 == len(s) or s[i+1] != s[i]:
ParseError(s, i, i, "Unexpected token")
is_and = (s[i] == '&')
(boolval2, j) = self.eval_bool_expr(s, i+2, terminators)
if j == len(s):
# This is an error condition one level up.
return (False, j)
if is_and:
return (boolval1 and boolval2, j)
else:
return (boolval1 or boolval2, j)
return (boolval1, i)
# Evaluate the inside of a ${} or %%.
# Returns the (the_evaluated_string, endpos)
def eval_bracket_expr(self, s, pos, terminators):
(_,pos) = self.eval_whitespace(s, pos)
if s[pos] == '@':
# Function call: ${@func}
# or possibly : ${@func:arg1:arg2...}
(_,i) = FindFirst(s, pos, [':']+terminators)
if i == len(s):
return ('', i) # Error one level up
funcname = s[pos+1:i]
if s[i] != ':':
j = i
args = []
else:
(_,j) = FindFirst(s, i+1, terminators)
if j == len(s):
return ('', j) # Error one level up
args = s[i+1:j].split(':')
val = self.functions[funcname](*args)
contents = self.eval(val)
return (contents, j)
(m,_) = FindFirst(s, pos, ['?']+terminators)
if m != '?':
# Regular variable substitution
(varname,i) = self.eval_varname(s, pos, terminators)
if len(s) == i:
return ('', i) # Error one level up
if varname not in self.data:
ParseError(s, pos, i, "Undefined variable '%s'" % varname)
vardata = self.data[varname]
contents = self.eval(vardata)
return (contents, i)
else:
# Ternary Mode
(is_cond_true,i) = self.eval_bool_expr(s, pos, ['?']+terminators)
assert(i < len(s) and s[i] == '?')
(if_true_expr, j) = self.eval_expr(s, i+1, [' : ']+terminators)
if j == len(s):
return ('', j) # Error one level up
if s[j:j+3] == ' : ':
(if_false_expr,j) = self.eval_expr(s, j+3, terminators)
if j == len(s):
# This is an error condition one level up.
return ('', j)
else:
if_false_expr = ''
if is_cond_true:
contents = if_true_expr.strip()
else:
contents = if_false_expr.strip()
return (contents, j)
# Evaluate an expression with ${} in string s, starting at pos.
# Returns (the_evaluated_expression, endpos)
def eval_expr(self, s, pos, terminators):
(m,i) = FindFirst(s, pos, ['${'] + terminators)
leftpart = s[pos:i]
if i == len(s) or m in terminators:
return (leftpart, i)
(middlepart, j) = self.eval_bracket_expr(s, i+2, ['}'])
if j == len(s) or s[j] != '}':
ParseError(s, i, j, 'Unterminated ${')
(rightpart, k) = self.eval_expr(s, j+1, terminators)
return (leftpart + middlepart + rightpart, k)
env = Environment()
def override_env(meth_name, func):
"""Override a method in the global |env|, given the method name
and the new function.
"""
global env
setattr(env, meth_name, types.MethodType(func, env, Environment))
| |
# -*- coding: utf-8 -*-
import copy
import functools
import httplib as http
import json
import logging
import os
from flask import request, make_response
import lxml.html
from mako.lookup import TemplateLookup
from mako.template import Template
import markupsafe
from werkzeug.exceptions import NotFound
import werkzeug.wrappers
from framework import sentry
from framework.exceptions import HTTPError
from framework.flask import app, redirect
from framework.sessions import session
from website import settings
logger = logging.getLogger(__name__)
TEMPLATE_DIR = settings.TEMPLATES_PATH
_TPL_LOOKUP = TemplateLookup(
default_filters=[
'unicode', # default filter; must set explicitly when overriding
],
directories=[
TEMPLATE_DIR,
settings.ADDON_PATH,
],
module_directory='/tmp/mako_modules'
)
_TPL_LOOKUP_SAFE = TemplateLookup(
default_filters=[
'unicode', # default filter; must set explicitly when overriding
'temp_ampersand_fixer', # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
'h',
],
imports=[
'from website.util.sanitize import temp_ampersand_fixer', # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
],
directories=[
TEMPLATE_DIR,
settings.ADDON_PATH,
],
module_directory='/tmp/mako_modules',
)
REDIRECT_CODES = [
http.MOVED_PERMANENTLY,
http.FOUND,
]
class Rule(object):
""" Container for routing and rendering rules."""
@staticmethod
def _ensure_list(value):
if not isinstance(value, list):
return [value]
return value
def __init__(self, routes, methods, view_func_or_data, renderer,
view_kwargs=None, endpoint_suffix=''):
"""Rule constructor.
:param routes: Route or list of routes
:param methods: HTTP method or list of methods
:param view_func_or_data: View function or data; pass data
if view returns a constant data dictionary
:param renderer: Renderer object or function
:param view_kwargs: Optional kwargs to pass to view function
:param endpoint_suffix: Optional suffix to append to endpoint name;
useful for disambiguating routes by HTTP verb
"""
if not callable(renderer):
raise ValueError('Argument renderer must be callable.')
self.routes = self._ensure_list(routes)
self.methods = self._ensure_list(methods)
self.view_func_or_data = view_func_or_data
self.renderer = renderer
self.view_kwargs = view_kwargs or {}
self.endpoint_suffix = endpoint_suffix
def wrap_with_renderer(fn, renderer, renderer_kwargs=None, debug_mode=True):
"""
:param fn: View function; must return a dictionary or a tuple containing
(up to) a dictionary, status code, headers, and redirect URL
:param renderer: Renderer object or function
:param renderer_kwargs: Optional kwargs to pass to renderer
:return: Wrapped view function
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if session:
session_error_code = session.data.get('auth_error_code')
else:
session_error_code = None
if session_error_code:
return renderer(
HTTPError(session_error_code),
**renderer_kwargs or {}
)
try:
if renderer_kwargs:
kwargs.update(renderer_kwargs)
data = fn(*args, **kwargs)
except HTTPError as error:
data = error
except Exception as error:
logger.exception(error)
if settings.SENTRY_DSN and not app.debug:
sentry.log_exception()
if debug_mode:
raise
data = HTTPError(
http.INTERNAL_SERVER_ERROR,
message=repr(error),
)
return renderer(data, **renderer_kwargs or {})
return wrapped
def data_to_lambda(data):
"""Create a lambda function that takes arbitrary arguments and returns
a deep copy of the passed data. This function must deep copy the data,
else other code operating on the returned data can change the return value
of the lambda.
"""
return lambda *args, **kwargs: copy.deepcopy(data)
view_functions = {}
def process_rules(app, rules, prefix=''):
"""Add URL routes to Flask / Werkzeug lookup table.
:param app: Flask / Werkzeug app
:param rules: List of Rule objects
:param prefix: Optional prefix for rule URLs
"""
for rule in rules:
# Handle view function
if callable(rule.view_func_or_data):
view_func = rule.view_func_or_data
renderer_name = getattr(
rule.renderer,
'__name__',
rule.renderer.__class__.__name__
)
endpoint = '{}__{}'.format(
renderer_name,
rule.view_func_or_data.__name__
)
view_functions[endpoint] = rule.view_func_or_data
# Handle view data: wrap in lambda and build endpoint from
# HTTP methods
else:
view_func = data_to_lambda(rule.view_func_or_data)
endpoint = '__'.join(
route.replace('/', '') for route in rule.routes
)
# Wrap view function with renderer
wrapped_view_func = wrap_with_renderer(
view_func,
rule.renderer,
rule.view_kwargs,
debug_mode=app.debug
)
# Add routes
for url in rule.routes:
try:
app.add_url_rule(
prefix + url,
endpoint=endpoint + rule.endpoint_suffix,
view_func=wrapped_view_func,
methods=rule.methods,
)
except AssertionError:
raise AssertionError('URLRule({}, {})\'s view function name is overwriting an existing endpoint'.format(prefix + url, view_func.__name__ + rule.endpoint_suffix))
### Renderer helpers ###
def render_mustache_string(tpl_string, data):
import pystache
return pystache.render(tpl_string, context=data)
def render_jinja_string(tpl, data):
pass
mako_cache = {}
def render_mako_string(tpldir, tplname, data, trust=True):
"""Render a mako template to a string.
:param tpldir:
:param tplname:
:param data:
:param trust: Optional. If ``False``, markup-save escaping will be enabled
"""
show_errors = settings.DEBUG_MODE # thanks to abought
# TODO: The "trust" flag is expected to be temporary, and should be removed
# once all templates manually set it to False.
lookup_obj = _TPL_LOOKUP_SAFE if trust is False else _TPL_LOOKUP
tpl = mako_cache.get(tplname)
if tpl is None:
with open(os.path.join(tpldir, tplname)) as f:
tpl_text = f.read()
tpl = Template(
tpl_text,
format_exceptions=show_errors,
lookup=lookup_obj,
input_encoding='utf-8',
output_encoding='utf-8',
default_filters=lookup_obj.template_args['default_filters'],
imports=lookup_obj.template_args['imports'] # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
)
# Don't cache in debug mode
if not app.debug:
mako_cache[tplname] = tpl
return tpl.render(**data)
renderer_extension_map = {
'.stache': render_mustache_string,
'.jinja': render_jinja_string,
'.mako': render_mako_string,
}
def unpack(data, n=4):
"""Unpack data to tuple of length n.
:param data: Object or tuple of length <= n
:param n: Length to pad tuple
"""
if not isinstance(data, tuple):
data = (data,)
return data + (None,) * (n - len(data))
def proxy_url(url):
"""Call Flask view function for a given URL.
:param url: URL to follow
:return: Return value of view function, wrapped in Werkzeug Response
"""
# Get URL map, passing current request method; else method defaults to GET
match = app.url_map.bind('').match(url, method=request.method)
response = app.view_functions[match[0]](**match[1])
return make_response(response)
def call_url(url, view_kwargs=None):
"""Look up and call view function by URL.
:param url: URL
:param view_kwargs: Optional kwargs to pass to view function
:return: Data from view function
"""
# Parse view function and args
func_name, func_data = app.url_map.bind('').match(url)
if view_kwargs is not None:
func_data.update(view_kwargs)
view_function = view_functions[func_name]
# Call view function
rv = view_function(**func_data)
# Extract data from return value
rv, _, _, _ = unpack(rv)
# Follow redirects
if isinstance(rv, werkzeug.wrappers.BaseResponse) \
and rv.status_code in REDIRECT_CODES:
redirect_url = rv.headers['Location']
return call_url(redirect_url)
return rv
### Renderers ###
class Renderer(object):
CONTENT_TYPE = 'text/html'
def render(self, data, redirect_url, *args, **kwargs):
raise NotImplementedError
def handle_error(self, error):
raise NotImplementedError
def __call__(self, data, *args, **kwargs):
"""Render data returned by a view function.
:param data: Dictionary or tuple of (up to) dictionary,
status code, headers, and redirect URL
:return: Flask / Werkzeug response object
"""
# Handle error
if isinstance(data, HTTPError):
return self.handle_error(data)
# Return if response
if isinstance(data, werkzeug.wrappers.BaseResponse):
return data
# Unpack tuple
data, status_code, headers, redirect_url = unpack(data)
# Call subclass render
rendered = self.render(data, redirect_url, *args, **kwargs)
# Return if response
if isinstance(rendered, werkzeug.wrappers.BaseResponse):
return rendered
# Set content type in headers
headers = headers or {}
headers['Content-Type'] = self.CONTENT_TYPE + '; charset=' + kwargs.get('charset', 'utf-8')
# Package as response
return make_response(rendered, status_code, headers)
class JSONRenderer(Renderer):
"""Renderer for API views. Generates JSON; ignores
redirects from views and exceptions.
"""
CONTENT_TYPE = 'application/json'
class Encoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_json'):
try:
return obj.to_json()
except TypeError: # BS4 objects have to_json that isn't callable
return unicode(obj)
return json.JSONEncoder.default(self, obj)
def handle_error(self, error):
headers = {'Content-Type': self.CONTENT_TYPE}
return self.render(error.to_data(), None), error.code, headers
def render(self, data, redirect_url, *args, **kwargs):
return json.dumps(data, cls=self.Encoder)
# Create a single JSONRenderer instance to avoid repeated construction
json_renderer = JSONRenderer()
class XMLRenderer(Renderer):
"""Renderer for API views. Generates XML; ignores
redirects from views and exceptions.
"""
CONTENT_TYPE = 'application/xml'
def handle_error(self, error):
return str(error.to_data()['message_long']), error.code
def render(self, data, redirect_url, *args, **kwargs):
return data
# Create a single XMLRenderer instance to avoid repeated construction
xml_renderer = XMLRenderer()
class WebRenderer(Renderer):
"""Renderer for web views. Generates HTML; follows redirects
from views and exceptions.
"""
CONTENT_TYPE = 'text/html'
error_template = 'error.mako'
# TODO: Should be a function, not a method
def detect_renderer(self, renderer, filename):
if renderer:
return renderer
try:
_, extension = os.path.splitext(filename)
return renderer_extension_map[extension]
except KeyError:
raise KeyError(
'Could not infer renderer from file name: {}'.format(
filename
)
)
def __init__(self, template_name,
renderer=None, error_renderer=None,
data=None, detect_render_nested=True,
trust=True, template_dir=TEMPLATE_DIR):
"""Construct WebRenderer.
:param template_name: Name of template file
:param renderer: Renderer callable; attempt to auto-detect if None
:param error_renderer: Renderer for error views; attempt to
auto-detect if None
:param data: Optional dictionary or dictionary-generating function
to add to data from view function
:param detect_render_nested: Auto-detect renderers for nested
templates?
:param trust: Boolean: If true, turn off markup-safe escaping
:param template_dir: Path to template directory
"""
self.template_name = template_name
self.data = data or {}
self.detect_render_nested = detect_render_nested
self.trust = trust
self.template_dir = template_dir
self.renderer = self.detect_renderer(renderer, template_name)
self.error_renderer = self.detect_renderer(
error_renderer,
self.error_template
)
def handle_error(self, error):
"""Handle an HTTPError.
:param error: HTTPError object
:return: HTML error page
"""
# Follow redirects
if error.redirect_url is not None:
return redirect(error.redirect_url)
# Render error page
# todo: use message / data from exception in error page
error_data = error.to_data()
return self.render(
error_data,
None,
template_name=self.error_template
), error.code
def render_element(self, element, data):
"""Render an embedded template.
:param element: The template embed (HtmlElement).
Ex: <div mod-meta='{"tpl": "name.html", "replace": true}'></div>
:param data: Dictionary to be passed to the template as context
:return: 2-tuple: (<result>, <flag: replace div>)
"""
attributes_string = element.get('mod-meta')
# Return debug <div> if JSON cannot be parsed
try:
element_meta = json.loads(attributes_string)
except ValueError:
return '<div>No JSON object could be decoded: {}</div>'.format(
markupsafe.escape(attributes_string)
), True
uri = element_meta.get('uri')
is_replace = element_meta.get('replace', False)
kwargs = element_meta.get('kwargs', {})
view_kwargs = element_meta.get('view_kwargs', {})
error_msg = element_meta.get('error', None)
# TODO: Is copy enough? Discuss.
render_data = copy.copy(data)
render_data.update(kwargs)
if uri:
# Catch errors and return appropriate debug divs
# todo: add debug parameter
try:
uri_data = call_url(uri, view_kwargs=view_kwargs)
render_data.update(uri_data)
except NotFound:
return '<div>URI {} not found</div>'.format(markupsafe.escape(uri)), is_replace
except Exception as error:
logger.exception(error)
if error_msg:
return '<div>{}</div>'.format(markupsafe.escape(unicode(error_msg))), is_replace
return '<div>Error retrieving URI {}: {}</div>'.format(
uri,
repr(error)
), is_replace
try:
template_rendered = self._render(
render_data,
element_meta['tpl'],
)
except Exception as error:
logger.exception(error)
return '<div>Error rendering template {}: {}'.format(
element_meta['tpl'],
repr(error)
), is_replace
return template_rendered, is_replace
def _render(self, data, template_name=None):
"""Render output of view function to HTML.
:param data: Data dictionary from view function
:param template_name: Name of template file
:return: Rendered HTML
"""
nested = template_name is None
template_name = template_name or self.template_name
if nested and self.detect_render_nested:
try:
renderer = self.detect_renderer(None, template_name)
except KeyError:
renderer = self.renderer
else:
renderer = self.renderer
# Catch errors and return appropriate debug divs
# todo: add debug parameter
try:
# TODO: Seems like Jinja2 and handlebars renderers would not work with this call sig
rendered = renderer(self.template_dir, template_name, data, trust=self.trust)
except IOError:
return '<div>Template {} not found.</div>'.format(template_name)
html = lxml.html.fragment_fromstring(rendered, create_parent='remove')
for element in html.findall('.//*[@mod-meta]'):
# Render nested template
template_rendered, is_replace = self.render_element(element, data)
original = lxml.html.tostring(element)
if is_replace:
replacement = template_rendered
else:
replacement = original
replacement = replacement.replace('><', '>' + template_rendered + '<')
rendered = rendered.replace(original, replacement)
## Parse HTML using html5lib; lxml is too strict and e.g. throws
## errors if missing parent container; htmlparser mangles whitespace
## and breaks replacement
#parsed = BeautifulSoup(rendered, 'html5lib')
#subtemplates = parsed.find_all(
# lambda tag: tag.has_attr('mod-meta')
#)
#
#for element in subtemplates:
#
# # Extract HTML of original element
# element_html = str(element)
#
# # Render nested template
# template_rendered, is_replace = self.render_element(element, data)
#
# # Build replacement
# if is_replace:
# replacement = template_rendered
# else:
# element.string = template_rendered
# replacement = str(element)
#
# # Replace
# rendered = rendered.replace(element_html, replacement)
return rendered
def render(self, data, redirect_url, *args, **kwargs):
"""Render output of view function to HTML, following redirects
and adding optional auxiliary data to view function response
:param data: Data dictionary from view function
:param redirect_url: Redirect URL; follow if not None
:return: Rendered HTML
"""
# Follow redirects
if redirect_url is not None:
return redirect(redirect_url)
template_name = kwargs.get('template_name')
# Load extra data
extra_data = self.data if isinstance(self.data, dict) else self.data()
data.update({key: val for key, val in extra_data.iteritems() if key not in data})
return self._render(data, template_name)
| |
#########################################
#----------------PyPath-----------------#
#Simple Path Tracer Programmed in Python#
#----------By: Julian Villella----------#
#------Start Date: August 17, 2011------#
#########################################
#Modules
from math import sqrt, cos, sin
from random import random, gauss
import array #for writing .ppm image file
from winsound import Beep #for beep sound when complete
from tkinter import * #for GUI
#========================================#
#==============CHANGE THESE==============#
#Must be a string like the default below
#and have .ppm extension as shown below
FILENAME = 'PyPath_Output.ppm'
#Must be a string like the default below
DIRECTORY = 'C:\\Users\\Julian\\Desktop\\'
#==============CHANGE THESE==============#
#========================================#
#Constants
EPSILON = 0.0001
HUGEVALUE = 1000000.0 #1 million
MAXDEPTH = 4 #max ray bounces
PI = 3.1415926535897932384
TWO_PI = 6.2831853071795864769
INVERTED_PI = 0.3183098861837906912
#-------------------------------------------------Vector3D class
class Vector3D:
#Initializer
def __init__(self, x_element, y_element, z_element):
self.x = x_element
self.y = y_element
self.z = z_element
#Operator Overloading
def __sub__(self, v):
return Vector3D(self.x - v.x, self.y - v.y, self.z - v.z)
def __add__(self, v):
return Vector3D(self.x + v.x, self.y + v.y, self.z + v.z)
def __mul__(self, s):
return Vector3D(self.x * s, self.y * s, self.z * s)
def __truediv__(self, s):
return Vector3D(self.x / s, self.y / s, self.z / s)
#Return dot product between two vectors
def Dot(a, b):
return a.x*b.x + a.y*b.y + a.z*b.z
#Return perpendicular vector
def Cross(a, b):
return Vector3D(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)
#Return length of vector
def Length(v):
return sqrt(v.x*v.x + v.y*v.y + v.z*v.z)
#Return normalized vector (unit vector)
def Normalize(v):
return v * (1.0 / Length(v))
#Return normal that is pointing on the side as the passed direction
def orient_normal(normal, direction):
if Dot(normal, direction) < 0.0:
return normal * -1.0 #flip normal
else:
return normal
#-------------------------------------------------Ray class
class Ray:
#Initializer
def __init__(self, origin = Vector3D(0.0, 0.0, 0.0),
direction = Vector3D(0.0, 0.0, 0.0)):
self.o = origin
self.d = direction
#Member Functions
def get_hitpoint(self, t):
return self.o + self.d * t
#-------------------------------------------------RGBColour class
class RGBColour:
#Initializer
def __init__(self, red, green, blue):
self.r = red
self.g = green
self.b = blue
#Operator Overloading
def __add__(self, c):
return RGBColour(self.r + c.r, self.g + c.g, self.b + c.b)
def __sub__(self, c):
return RGBColour(self.r - c.r, self.g - c.g, self.b - c.b)
def __mul__(self, s):
return RGBColour(self.r * s, self.g * s, self.b * s)
def __truediv__(self, s):
return RGBColour(self.r / s, self.g / s, self.b / s)
#this alows us to multipy by another RGBColour
def multiply(self, c):
return RGBColour(self.r * c.r, self.g * c.g, self.b * c.b)
#Member Functions
def clamp(self, minimum, maximum):
#red
if(self.r > maximum): self.r = maximum
if(self.r < minimum): self.r = minimum
#green
if(self.g > maximum): self.g = maximum
if(self.g < minimum): self.g = minimum
#blue
if(self.b > maximum): self.b = maximum
if(self.b < minimum): self.b = minimum
#Constants
BLACK = RGBColour(0.0, 0.0, 0.0)
WHITE = RGBColour(1.0, 1.0, 1.0)
RED = RGBColour(1.0, 0.0, 0.0) #for testing
#-------------------------------------------------BxDF classes
#Free Functions
#Pass two numbers range [0, 1] and exponent
def SampleHemisphere(u1, u2, exp):
z = pow(1.0 - u1, 1.0 / (exp + 1.0))
phi = TWO_PI * u2 #Azimuth
theta = sqrt(max(0.0, 1.0 - z*z)) #Polar
p = Vector3D
p.x = theta * cos(phi)
p.y = theta * sin(phi)
p.z = z
return (p)
def OrientedHemiDir(u1, u2, normal, exp):
p = SampleHemisphere(u1, u2, exp) #random point on hemisphere
#create orthonormal basis around normal
w = normal
v = Cross(Vector3D(0.00319, 1.0, 0.0078), w) #jittered up
v = Normalize(v) #normalize
u = Cross(v, w)
hemi_dir = (u * p.x) + (v * p.y) + (w * p.z) #linear projection
return Normalize(hemi_dir) #normalized
#Lambertian
class BxDF:
def __init__(self):
self.ke = BLACK #default, unless set with set_emission()
def set_emission(self, emission_colour):
self.ke = emission_colour
def get_emission(self):
return self.ke
class Lambertian(BxDF):
#Initializer
def __init__(self, diffuse_colour):
BxDF.__init__(self) #call base constructor
self.kd = diffuse_colour
#Member Functions
def f(self, incoming, outgoing, normal):
return self.kd * INVERTED_PI #Colour
#return tuple (incoming direction, pdf)
def sample_f(self, normal, outgoing):
u1 = random()
u2 = random()
wi = OrientedHemiDir(u1, u2, normal, 0.0)
pdf = Dot(normal, wi) * INVERTED_PI
return(wi, pdf)
#SpecularReflection
class PerfectSpecular(BxDF):
def __init__(self, specular_colour):
BxDF.__init__(self) #call base constructor
self.ks = specular_colour
#Member Functions
def f(self, incoming, outgoing, normal):
return self.ks #Colour
#return tuple (incoming direction, pdf)
def sample_f(self, normal, outgoing):
ndotwo = Dot(normal, outgoing)
#perfect mirror reflection
wi = (outgoing * -1.0) + normal * 2.0 * ndotwo
pdf = Dot(normal, wi)
return(wi, pdf)
#Glossy
class GlossySpecular(BxDF):
def __init__(self, specular_colour, specular_exponent):
BxDF.__init__(self) #call base constructor
self.ks = specular_colour
self.exp = specular_exponent
#Member Functions
def f(self, incoming, outgoing, normal):
ndotwi = Dot(normal, incoming) #normal and incoming light
reflect_dir = (incoming * -1.0) + normal * 2.0 * ndotwi
rdotwo = Dot(reflect_dir, outgoing)
if rdotwo > 0.0:
return self.ks * pow(rdotwo, self.exp)
else:
return BLACK
#return tuple (incoming direction, pdf)
def sample_f(self, normal, outgoing):
#perfect mirror reflection
ndotwo = Dot(normal, outgoing)
reflect_dir = (outgoing * -1.0) + normal * 2.0 * ndotwo
#orthonormal basis
w = reflect_dir
v = Cross(Vector3D(0.00419, 1.0, 0.0078), w)
v = Normalize(v)
u = Cross(v, w)
#random samples
u1 = random() #[0, 1]
u2 = random() #[0, 1]
p = SampleHemisphere(u1, u2, self.exp) #point in hemi
wi = (u * p.x) + (v * p.y) + (w * p.z) #linear projection
if Dot(normal, wi) < 0.0: #if reflected direction is below surface
wi = (u * -p.x) + (v * -p.y) + (w * p.z) #reflect it
#phong lobe
phong_lobe = pow(Dot(reflect_dir, wi), self.exp)
pdf = Dot(normal, wi) * phong_lobe
return(wi, pdf)
#-------------------------------------------------Primitive classes
class Primitive:
#Setters
def set_BxDF(self, BxDF):
self.BxDF = BxDF
#Getters
def get_BxDF(self):
return self.BxDF
#Sphere
class Sphere(Primitive):
#Initializer
def __init__(self, sphere_origin, sphere_radius):
self.origin = sphere_origin
self.radius = sphere_radius
self.radius_squared = sphere_radius * sphere_radius #optimization
#Member Functions
#Returns tuple of (bool hit, distance, hit_point, normal)
def intersect(self, ray):
ray_dir = Normalize(ray.d)
temp = ray.o - self.origin
A = Dot(ray_dir, ray_dir)
B = 2.0 * Dot(ray_dir, temp)
C = Dot(temp, temp) - self.radius_squared
disc = (B * B) - (4.0 * A * C) #Discriminant
if disc < 0.0: #No Hit
hit = False
distance = 0.0
hit_point = Vector3D(0.0, 0.0, 0.0)
normal = Vector3D(0.0, 0.0, 0.0)
return(hit, distance, hit_point, normal) #tuple
sqrt_disc = sqrt(disc) #square root of discriminant
tmin = (-B - sqrt_disc) / (2.0 * A)
if tmin >= EPSILON: #Hit
hit = True
distance = tmin
hit_point = ray.get_hitpoint(tmin)
normal = Normalize((hit_point - self.origin) / self.radius)
return(hit, distance, hit_point, normal) #tuple
tmax = (-B + sqrt_disc) / (2.0 * A)
if tmax >= EPSILON: #Hit
hit = True
distance = tmax
hit_point = ray.get_hitpoint(tmax)
normal = Normalize((hit_point - self.origin) / self.radius)
return(hit, distance, hit_point, normal) #tuple
#Ray did not intersect sphere
hit = False
hit_point = Vector3D(0.0, 0.0, 0.0)
distance = 0.0
normal = Vector3D(0.0, 0.0, 0.0)
return(hit, distance, hit_point, normal) #tuple
#Plane
class Plane(Primitive):
#Initializer
def __init__(self, plane_origin, plane_normal):
self.origin = plane_origin
self.normal = Normalize(plane_normal)
#Member Functions
#Returns tuple of (bool hit, distance, hit_point, normal)
def intersect(self, ray):
ray_dir = Normalize(ray.d)
denominator = Dot(ray_dir, self.normal)
if denominator == 0.0: #Check for division by zero
#ray is parallel, no hit
hit = False
distance = 0.0
hit_point = Vector3D(0.0, 0.0, 0.0)
return(hit, distance, hit_point, self.normal) #tuple
t = Dot(self.normal, (self.origin - ray.o)) / denominator
if t >= EPSILON: #Hit
hit = True
distance = t
hit_point = ray.get_hitpoint(t)
return(hit, distance, hit_point, self.normal) #tuple
#Ray did not intersect plane
hit = False
distance = 0.0
hit_point = Vector3D(0.0, 0.0, 0.0)
return(hit, distance, hit_point, self.normal) #tuple
#-------------------------------------------------Integrator Classes
#the integrators also act like a scene class in that-
#it stores all the primitives that are to be ray traced.
class RayCastIntegrator:
pass
class RayTraceIntegrator:
pass
class PathTraceIntegrator:
#Initializer - creates object list
def __init__(self):
self.primitives = []
#trace light path
def trace_ray(self, ray, depth):
result = RGBColour(0.0, 0.0, 0.0) #black
t = HUGEVALUE
index = -1 #-1 means no hit
if depth > MAXDEPTH:
return result
#find closest hit object, its distance, hit_point and normal
#scan through primitives in scene, find closest
for i in range(0, len(self.primitives)):
#intersect returns tuple of (bool hit, distance, hit_point, normal)
hit_data = self.primitives[i].intersect(ray)
if hit_data[0] == True: #Hit
if hit_data[1] < t: #Distance
t = hit_data[1]
hit_point = hit_data[2] #hit_point
normal = hit_data[3] #normal
index = i #closest primitive index number
if index == -1: #No Hit
return BLACK
else: #Hit
wo = ray.d * -1.0 #outgoing (towards camera)
normal = orient_normal(normal, wo) #make normal point in correct direction
#sample_f returns tuple (incoming direction, pdf)
shading_data = self.primitives[index].get_BxDF().sample_f(normal, wo)
wi = shading_data[0] #incoming direction
pdf = shading_data[1] #pdf
if pdf <= 0.0:
pdf = 1.0
f = self.primitives[index].get_BxDF().f(wi, wo, normal)
incoming_ray = Ray(hit_point, wi) #make incoming to follow
#Russian Roulette
RR_prob = 0.66
if depth > 2:
if(random() < RR_prob): #2/3 chance we stop here
return result
result = result + f.multiply(self.trace_ray(incoming_ray, depth + 1)) * Dot(wi, normal) / pdf
#Add emission
result = result + self.primitives[index].get_BxDF().get_emission()
result = result / RR_prob
return result #return final colour
#add objects
def add_primitive(self, primitive):
self.primitives.append(primitive)
#-------------------------------------------------Camera Class
class Camera:
#Initializer
def __init__(self, eye_point, focal_point, view_distance, up_vector,
image_height, image_width, samples_per_pixel):
self.eye = eye_point
self.focal = focal_point
self.view_dist = view_distance
self.up = up_vector
self.height = image_height
self.width = image_width
self.spp = samples_per_pixel
#setup orthonormal basis
#####default values
#####self.u = Vector3D(1.0, 0.0, 0.0)
#####self.v = Vector3D(0.0, 1.0, 0.0)
#####self.w = Vector3D(0.0, 0.0, 1.0)
self.compute_uvw()
#create empty image array
self.image_array = array.array('B', [0] * (image_width * image_height * 3))
#Member Functions
#setup orthonormal basis for camera
def compute_uvw(self):
#w
self.w = self.eye - self.focal
self.w = Normalize(self.w)
#u
self.u = Cross(self.up, self.w)
self.u = Normalize(self.u)
#v
self.v = Cross(self.w, self.u)
self.v = Normalize(self.v)
#check for singularity. if conditions met, camera orientations are hardcoded
#camera looking straight down
if (self.eye.x == self.focal.x and
self.eye.z == self.focal.z and
self.focal.y < self.eye.y):
self.u = Vector3D(0.0, 0.0, 1.0)
self.v = Vector3D(1.0, 0.0, 0.0)
self.w = Vector3D(0.0, 1.0, 0.0)
#camera looking straight up
if (self.eye.x == self.focal.x and
self.eye.z == self.focal.z and
self.focal.y > self.eye.y):
self.u = Vector3D(1.0, 0.0, 0.0)
self.v = Vector3D(0.0, 0.0, 1.0)
self.w = Vector3D(0.0, -1.0, 0.0)
#save pixel to array
def save_pixel(self, single_pixel, x, y):
pixel = single_pixel * 255
pixel.clamp(0.0, 255.0)
#write to array
i = ((self.height - y - 1) * self.width + x)
self.image_array[i*3 + 0] = int(pixel.r)
self.image_array[i*3 + 1] = int(pixel.g)
self.image_array[i*3 + 2] = int(pixel.b)
#save pixel array to file
def save_image(self, filename):
#create image file
image = open(DIRECTORY + filename, 'wb')
#write magic number, and filename
image.write(("P6\n#" + filename).encode())
#write image width, height and max colour-component value
image.write(("\n" + str(self.width) + " " + str(self.height) + "\n255\n").encode())
#write image_array to .ppm file
image.write(self.image_array.tostring())
#close .ppm file
image.close()
print("Image Saved")
def get_direction(self, x, y):
direction = (self.u * x) + (self.v * y) - (self.w * self.view_dist)
return(Normalize(direction))
#spawns spp number of rays for each pixel
def render(self, integrator):
ray = Ray()
ray.o = self.eye
pixel = BLACK #create black pixel
for x in range(0, self.width):
for y in range(0, self.height):
pixel = BLACK #start at black
for s in range(0, self.spp):
sp_x = (x + random()) - (self.width / 2.0)
sp_y = (y + random()) - (self.height / 2.0)
ray.d = self.get_direction(sp_x, sp_y)
pixel = pixel + integrator.trace_ray(ray, 1)
pixel = pixel/self.spp
self.save_pixel(pixel, x, y) #save pixel to pixel array
print((x / self.width) * 100, "%")
#save image to file
self.save_image(FILENAME) #FILENAME is define at top of source file
#Play sound to signal a beep (For Windows)
for i in range (1, 4):
Beep(i * 500, 250)
#-------------------------------------------------Main
#Create Integrator
path_tracer = PathTraceIntegrator()
#Create Primitives w/ Materials
#materials
gold_diff = Lambertian(RGBColour(1.0, 0.8, 0.3))
ground_diff = Lambertian(RGBColour(0.15, 0.15, 0.15))
red_emitt = Lambertian(RGBColour(3.0, 0.0, 0.0))
red_emitt.set_emission(RGBColour(3.0, 0.0, 0.0))
blue_emitt = Lambertian(RGBColour(0.0, 0.0, 3.0))
blue_emitt.set_emission(RGBColour(0.0, 0.0, 3.0))
grey_emitt_plane = Lambertian(RGBColour(0.2, 0.2, 0.2))
grey_emitt_plane.set_emission(RGBColour(0.2, 0.2, 0.2))
mirror = PerfectSpecular(RGBColour(1.0, 1.0, 1.0))
glossy = GlossySpecular(RGBColour(0.2, 1.0, 0.3), 35.0)
#sphere 1 - yellow main
sphere_1 = Sphere(Vector3D(0.0, 0.0, 0.0), 18.0)
sphere_1.set_BxDF(gold_diff)
path_tracer.add_primitive(sphere_1)
#sphere 2 - red sphere light
sphere_2 = Sphere(Vector3D(-20.0, 22.0, 10.0), 6.25)
sphere_2.set_BxDF(red_emitt)
path_tracer.add_primitive(sphere_2)
#sphere 3 - blue sphere light on ground
sphere_3 = Sphere(Vector3D(20.0, -13.0, 25.0), 4.0)
sphere_3.set_BxDF(blue_emitt)
path_tracer.add_primitive(sphere_3)
#sphere 4 - mirror front
sphere_4 = Sphere(Vector3D(4.0, -8.0, 20.0), 8.0)
sphere_4.set_BxDF(glossy)
path_tracer.add_primitive(sphere_4)
#plane 1 - bottom ground
plane_1 = Plane(Vector3D(0.0, -16.0, 0.0), Vector3D(0.0, 1.0, 0.0))
plane_1.set_BxDF(ground_diff)
path_tracer.add_primitive(plane_1)
#plane 2 - top light
plane_2 = Plane(Vector3D(0.0, 45.0, 0.0), Vector3D(0.0, -1.0, 0.0))
plane_2.set_BxDF(grey_emitt_plane)
path_tracer.add_primitive(plane_2)
#Create Camera
eye = Vector3D(-3.0, 0.0, 190.0) #higher z = more narrow view
focal = Vector3D(0.0, 0.0, 0.0)
view_distance = 1000 #larger = more orthographic like
up = Vector3D(0.0, 1.0, 0.0)
height = 400
width = 400
spp = 128
cam = Camera(eye, focal, view_distance, up, height, width, spp)
cam.render(path_tracer) #trace scene and save image
#-------------------------------------------------Temporary GUI
#GUI using tkinter
root = Tk()
root.title("PyPath")
#open saved image image
#use camera variables set above
viewer = Canvas(root, width=width, height=height)
image_name = PhotoImage(file = DIRECTORY + FILENAME)
viewer.create_image(width/2.0, height/2.0, image = image_name)
viewer.grid(row=0, column=0)
root.mainloop()
| |
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import os, logging as _logging
from lib.aws import requests
from scli import prompt
from scli import config_file
from scli import api_wrapper
from scli.constants import EbDefault, ParameterName, ParameterSource, \
EnvironmentHealth, EnvironmentStatus, ServiceDefault, SubCommandType, ValidationSeverity
from scli.exception import EBSCliException
from scli.operation.base import OperationBase, OperationResult
from scli.parameter import Parameter
from scli.resources import CreateEnvironmentOpMessage, DescribeEnvironmentOpMessage,\
GetEnvironmentEventsOpMessage,\
TerminateEnvironmentOpMessage, UpdateEnvironmentOptionSettingOpMessage,\
ValidationMessage, WaitForCreateEnvironmentFinishOpMessage, \
WaitForTerminateEnvironmentFinishOpMessage, WaitForUpdateEnvOptionSettingFinishOpMessage,\
EnvRetrieveInfoOpMessage
from scli.terminal.base import TerminalBase
from lib.elasticbeanstalk import eb_utils
from lib.elasticbeanstalk.exception import AlreadyExistException
from lib.aws.exception import InvalidParameterValueException
from lib.elasticbeanstalk.request import TemplateSpecification
from lib.rds import rds_utils
from lib.utility import misc, shell_utils
log = _logging.getLogger('cli.op')
class DescribeEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.ApplicationName,
ParameterName.EnvironmentName,
}
_output_parameters = {
ParameterName.EnvironmentName
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName, False)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
prompt.action(DescribeEnvironmentOpMessage.Start.format(env_name))
response = eb_client.describe_environments(app_name,
env_name,
include_deleted = 'false')
log.info('Received response for DescribeEnvironemnts call.')
self._log_api_result(self.__class__.__name__, 'DescribeEnvironments', response.result)
# Also look up environment resources for future use
resources = None
try:
resources = api_wrapper.retrieve_environment_resources(parameter_pool, env_name)
except InvalidParameterValueException:
pass
env_present = (len(response.result) > 0) and bool(resources)
if env_present: # If have result
env_info = response.result[0]
message = DescribeEnvironmentOpMessage.Result.format(env_info.cname,
env_info.status,
env_info.health)
prompt.result(message)
# Display sqs queue info before environment detail
if resources.queues:
for queue in resources.queues:
message = DescribeEnvironmentOpMessage.QueueInfo.format(queue.name, queue.url)
prompt.result(message)
tier_serialized = env_info.tier.to_serialized_string() if env_info.tier else ''
prompt.info(DescribeEnvironmentOpMessage.Detail.format(env_info.environment_name,
env_info.environment_id,
tier_serialized,
env_info.solution_stack_name,
env_info.version_label,
env_info.date_created,
env_info.date_updated,
env_info.description if env_info.description else ''))
# If not Green, pull the most recent warning and error events
if env_info.health in [EnvironmentHealth.Red, EnvironmentHealth.Yellow] \
or (env_info.status == EnvironmentStatus.Ready \
and env_info.health == EnvironmentHealth.Grey):
events = eb_client.describe_events(app_name,
env_name,
max_records = ServiceDefault.STATUS_EVENT_MAX_NUM,
severity = ServiceDefault.STATUS_EVENT_LEVEL)
if len(events.result) > 0:
# Having one error event
for event in events.result:
msg = '{0}\t{1}\t{2}'.format(event.event_date,
event.severity,
event.message)
log.info('Found last error event: {0}'.format(msg))
prompt.plain(msg)
# Display RDS instance host info
try:
logical_id, rds_property = rds_utils.retrieve_rds_instance_property\
(parameter_pool, resources)
if rds_property is not None:
prompt.result(DescribeEnvironmentOpMessage.RdsInfo.format\
(logical_id,
rds_property.endpoint.address,
rds_property.endpoint.port))
prompt.info(DescribeEnvironmentOpMessage.RdsDetail.format\
(rds_property.engine + ' ' + rds_property.engine_version,
rds_property.allocated_storage,
rds_property.db_instance_class,
rds_property.multi_az,
rds_property.master_username,
rds_property.instance_create_time,
rds_property.db_instance_status))
except BaseException as ex:
log.error('Encountered error when retrieve environment resources: {0}.'.format(ex))
raise
# Subcommand
_, subcommands = parameter_pool.command
subcommand = subcommands[0].upper() if len(subcommands) > 0 else None
if subcommand == SubCommandType.OPEN:
urlpath = ''
if len(subcommands) > 1:
urlpath = subcommands[1] if subcommands[1].startswith('/') else '/' + subcommands[1]
shell_utils.open_url(env_info.cname + urlpath, False)
else:
# No result. Environment not exist.
message = DescribeEnvironmentOpMessage.NoEnvironment.format(env_name)
prompt.result(message)
ret_result = OperationResult(self, response.request_id, message, response.result)
return ret_result
class CreateEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.ApplicationName,
ParameterName.ApplicationVersionName,
ParameterName.EnvironmentName,
ParameterName.SolutionStack,
ParameterName.RdsEnabled,
}
_output_parameters = {
ParameterName.EnvironmentName,
ParameterName.EnvironmentId,
ParameterName.CreateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName, False)
version_name = eb_utils.check_app_version(parameter_pool, eb_client)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
stack_name = parameter_pool.get_value(ParameterName.SolutionStack, False)
tier = parameter_pool.get_value(ParameterName.EnvironmentTier, False)
spec = TemplateSpecification()
# Try load option setting file if exist
option_location = parameter_pool.get_value(ParameterName.OptionSettingFile, False)
option_settings = config_file.load_env_option_setting_file(option_location,
quiet = True)
if option_settings is not None and len(option_settings) > 0:
prompt.info(CreateEnvironmentOpMessage.UsingOptionSetting.format(option_location))
else:
option_settings = dict()
option_remove = dict()
# Process extensions first before we process options
self._extension_handler(parameter_pool, spec, stack_name, option_settings, option_remove)
# Process options
self._option_setting_handler(parameter_pool, spec, stack_name, None, option_settings, option_remove)
prompt.action(CreateEnvironmentOpMessage.Start.format(env_name))
try:
response = eb_client.create_environment(application = app_name,
environment = env_name,
solution_stack = stack_name,
version_label = version_name,
option_settings = option_settings,
option_remove = option_remove,
template_specification = spec,
tier = tier)
except AlreadyExistException:
log.info('Environment "{0}" already exist.'.format(env_name))
prompt.result(CreateEnvironmentOpMessage.AlreadyExist.format(env_name))
ret_result = OperationResult(self,
None,
CreateEnvironmentOpMessage.AlreadyExist.format(env_name),
None)
else:
log.info('Received response for CreateEnvironemnt call.')
prompt.info(CreateEnvironmentOpMessage.Succeed)
prompt.result(CreateEnvironmentOpMessage.WaitAfterLaunch.format(env_name))
self._log_api_result(self.__class__.__name__, 'CreateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.CreateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
CreateEnvironmentOpMessage.Succeed,
response.result)
return ret_result
def _rds_creation(self):
pass
class WaitForCreateEnvironmentFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.CreateEnvironmentRequestID,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForFinishTimeout, False)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay, False)
create_request_id = parameter_pool.get_value(ParameterName.CreateEnvironmentRequestID)
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = create_request_id,
pending_status = EnvironmentStatus.Launching,
expected_health = None,
operation_name = self.__class__.__name__,
action_name = WaitForCreateEnvironmentFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'false',
initial_delay = 0)
# After polling
status = result[0].status
health = result[0].health
cname = result[0].cname
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.\nURL is "{3}".'.\
format(env_name, status, health, cname))
if status.lower() == EnvironmentStatus.Ready.lower() \
and health.lower() == EnvironmentHealth.Green.lower():
prompt.info(WaitForCreateEnvironmentFinishOpMessage.Succeed.format(env_name))
prompt.result(WaitForCreateEnvironmentFinishOpMessage.Result.format(cname))
else:
prompt.info(WaitForCreateEnvironmentFinishOpMessage.Timeout.format(env_name))
ret_result = OperationResult(self,
None,
WaitForCreateEnvironmentFinishOpMessage.Result.\
format(cname, status, health),
result)
return ret_result
class TerminateEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
}
_output_parameters = {
ParameterName.TerminateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
prompt.action(TerminateEnvironmentOpMessage.Start.format(env_name))
try:
response = eb_client.terminate_environment(env_name)
except:
raise
else:
log.info('Received response for TerminateEnvironemnt call.')
prompt.result(TerminateEnvironmentOpMessage.Succeed.format(env_name))
self._log_api_result(self.__class__.__name__, 'TerminateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.TerminateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
TerminateEnvironmentOpMessage.Succeed,
response.result)
return ret_result
class WaitForTerminateEnvironmentFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.TerminateEnvironmentRequestID,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForFinishTimeout, False)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay, False)
terminate_request_id = parameter_pool.get_value(ParameterName.TerminateEnvironmentRequestID)
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = terminate_request_id,
pending_status = EnvironmentStatus.Terminating,
expected_health = None,
operation_name = self.__class__.__name__,
action_name = WaitForTerminateEnvironmentFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'true',
initial_delay = ServiceDefault.TERMINATE_ENV_POLL_DELAY)
# After polling
status = result[0].status
health = result[0].health
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.'.format\
(env_name, status, health))
if status.lower() == EnvironmentStatus.Terminated.lower():
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Succeed.format(env_name))
else:
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Timeout.format(env_name))
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Status.format(status, health))
ret_result = OperationResult(self,
None,
WaitForTerminateEnvironmentFinishOpMessage.Result.format(status),
result)
return ret_result
class UpdateEnvOptionSettingOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.OptionSettingFile,
ParameterName.RdsEnabled,
}
_output_parameters = {
ParameterName.TerminateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName, False)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
stack_name = parameter_pool.get_value(ParameterName.SolutionStack, False)
prompt.action(UpdateEnvironmentOptionSettingOpMessage.Start.format(env_name))
tier = parameter_pool.get_value(ParameterName.EnvironmentTier, False)
spec = TemplateSpecification()
# Try load option setting file if exist
option_location = parameter_pool.get_value(ParameterName.OptionSettingFile, False)
option_settings = config_file.load_env_option_setting_file(option_location,
quiet = True)
if option_settings is not None and len(option_settings) > 0:
prompt.info(UpdateEnvironmentOptionSettingOpMessage.UsingOptionSetting.format(option_location))
else:
option_settings = dict()
option_remove = dict()
# Process extensions first before we process options
self._extension_handler(parameter_pool, spec, stack_name, option_settings, option_remove)
# Process options
self._option_setting_handler(parameter_pool, spec, stack_name, env_name, option_settings, option_remove)
self._validate_change(parameter_pool, eb_client, app_name, env_name,
option_settings, option_remove, spec)
try:
response = eb_client.update_environment(env_name,
option_settings = option_settings,
option_remove = option_remove,
template_specification = spec,
tier = tier)
except:
raise
else:
log.info('Received response for UpdateEnvironemnt call.')
prompt.result(UpdateEnvironmentOptionSettingOpMessage.Succeed.format(env_name))
self._log_api_result(self.__class__.__name__, 'UpdateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.UpdateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
UpdateEnvironmentOptionSettingOpMessage.Succeed.format(env_name),
response.result)
return ret_result
def _validate_change(self, parameter_pool, eb_client, app_name, env_name,
option_settings, option_remove, template_spec):
response = eb_client.validate_configuration_settings(app_name, option_settings,
environment_name = env_name,
option_remove = option_remove,
template_specification = template_spec)
warning_count = 0
error_count = 0
for message in response.result:
if misc.string_equal_ignore_case(message.severity, ValidationSeverity.SeverityError):
error_count = error_count + 1
else:
warning_count = warning_count + 1
prompt.error(ValidationMessage.ValidateSettingError.format\
(message.severity, message.namespace, message.option_name, message.message))
if error_count > 0:
log.info('Validating configuration setting failed. Abort command.')
raise EBSCliException()
elif warning_count > 0:
if parameter_pool.get_value(ParameterName.Force) == ServiceDefault.ENABLED:
pass
elif not TerminalBase.ask_confirmation(UpdateEnvironmentOptionSettingOpMessage.Continue):
log.info('User cancelled command.')
raise EBSCliException()
else:
log.info('Validating configuration setting passed.')
class WaitForUpdateEnvOptionSettingFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForUpdateTimeout, False)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay, False)
# update_request_id = parameter_pool.get_value(ParameterName.UpdateEnvironmentRequestID)
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = None,
pending_status = EnvironmentStatus.Updating,
expected_health = EnvironmentHealth.Green,
operation_name = self.__class__.__name__,
action_name = WaitForUpdateEnvOptionSettingFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'false',
initial_delay = ServiceDefault.UPDATE_ENV_POLL_DELAY)
# After polling
status = result[0].status
health = result[0].health
cname = result[0].cname
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.\nURL is "{3}".'.\
format(env_name, status, health, cname))
if status.lower() == EnvironmentStatus.Ready.lower() \
and health.lower() == EnvironmentHealth.Green.lower():
prompt.result(WaitForUpdateEnvOptionSettingFinishOpMessage.Succeed.format(env_name))
else:
prompt.result(WaitForUpdateEnvOptionSettingFinishOpMessage.Timeout.format(env_name))
prompt.info(WaitForUpdateEnvOptionSettingFinishOpMessage.Result.\
format(cname, status, health))
ret_result = OperationResult(self,
None,
WaitForUpdateEnvOptionSettingFinishOpMessage.Result.\
format(cname, status, health),
result)
return ret_result
class GetEnvironmentEventsOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName, False)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
max_records = parameter_pool.get_value(ParameterName.SubCommand)
try:
max_records = int(max_records[0]) if len(max_records) > 0 else ServiceDefault.EVENT_DEFAULT_NUM
except ValueError:
raise EBSCliException(GetEnvironmentEventsOpMessage.NotValidNumber.format(max_records[0]))
response = eb_client.describe_events(app_name, env_name, max_records=max_records)
if len(response.result) > 0:
for event in response.result:
msg = '{0}\t{1}\t{2}'.format(event.event_date,
event.severity,
event.message)
prompt.plain(msg)
ret_result = OperationResult(self, response.request_id, None, response.result)
return ret_result
class EnvRequestLogOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
}
_output_parameters = {
ParameterName.TerminateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
_, subcommands = parameter_pool.command
info_type = subcommands[0].lower() if len(subcommands) > 0 else EbDefault.TailLog
response = eb_client.request_environment_info(env_name, info_type=info_type)
parameter_pool.put(Parameter(ParameterName.RequestEnvInfoRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self, response.request_id, None, None)
return ret_result
class EnvRetrieveLogOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName, False)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForUpdateTimeout, False)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay, False)
info_request_id = parameter_pool.get_value(ParameterName.RequestEnvInfoRequestID)
self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = info_request_id,
pending_status = EnvironmentStatus.Updating,
expected_health = None,
operation_name = self.__class__.__name__,
action_name = EnvRetrieveInfoOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'false',
initial_delay = ServiceDefault.UPDATE_ENV_POLL_DELAY,
quiet = False)
# After polling
_, subcommands = parameter_pool.command
info_type = subcommands[0].lower() if len(subcommands) > 0 else EbDefault.TailLog
response = eb_client.retrieve_environment_info(env_name, info_type=info_type)
# Sort and find latest log for each instance
instance_timestamps = dict()
instance_logs = dict()
for env_info in response.result:
instance_id = env_info.ec2_instance_id
timestamp = env_info.sample_timestamp
url = env_info.message
if instance_id not in instance_timestamps\
or instance_timestamps[instance_id] < timestamp:
instance_timestamps[instance_id] = timestamp
instance_logs[instance_id] = url
for instance_id in sorted(instance_logs.keys()):
content = misc.to_unicode(requests.get(instance_logs[instance_id]).content)
prompt.result(os.linesep +
misc.to_terminal_codepage(EnvRetrieveInfoOpMessage.FileOuputPrefix.format(instance_id)))
prompt.result(misc.to_terminal_codepage(content))
ret_result = OperationResult(self,
None,
None,
None)
return ret_result
| |
from apps.projects.models import Project, ProjectPhases
from apps.sepa.sepa import SepaDocument, SepaAccount
from django.conf import settings
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from djchoices import DjangoChoices, ChoiceItem
from django.db.models.signals import post_save
from django.dispatch import receiver
import csv
class Payout(models.Model):
"""
A projects is payed after it's fully funded in the first batch (2x/month).
Project payouts are checked manually. Selected projects can be exported to a SEPA file.
"""
class PayoutLineStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
progress = ChoiceItem('progress', label=_("Progress"))
completed = ChoiceItem('completed', label=_("Completed"))
planned = models.DateField(_("Planned"), help_text=_("Date that this batch should be processed."))
project = models.ForeignKey('projects.Project')
status = models.CharField(_("status"), max_length=20, choices=PayoutLineStatuses.choices)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("Currency"), max_length=3)
sender_account_number = models.CharField(max_length=100)
receiver_account_number = models.CharField(max_length=100, blank=True)
receiver_account_iban = models.CharField(max_length=100, blank=True)
receiver_account_bic = models.CharField(max_length=100, blank=True)
receiver_account_name = models.CharField(max_length=100)
receiver_account_city = models.CharField(max_length=100)
receiver_account_country = models.CharField(max_length=100, null=True)
invoice_reference = models.CharField(max_length=100)
description_line1 = models.CharField(max_length=100, blank=True, default="")
description_line2 = models.CharField(max_length=100, blank=True, default="")
description_line3 = models.CharField(max_length=100, blank=True, default="")
description_line4 = models.CharField(max_length=100, blank=True, default="")
@property
def local_amount(self):
return '%.2f' % (float(self.amount) / 100)
@property
def local_amount_safe(self):
return '%.2f' % (self.project.projectcampaign.money_safe * settings.PROJECT_PAYOUT_RATE / 100)
@property
def is_valid(self):
# TODO: Do a more advanced check. Maybe use IBAN check by a B. Q. Konrath?
if self.receiver_account_iban and self.receiver_account_bic:
return True
return False
@property
def amount_safe(self):
return int(round(self.project.projectcampaign.money_safe * settings.PROJECT_PAYOUT_RATE))
def __unicode__(self):
date = self.created.strftime('%d-%m-%Y')
return self.invoice_reference + " : " + date + " : " + self.receiver_account_number + " : EUR " + str(self.local_amount)
class BankMutationLine(models.Model):
created = CreationDateTimeField(_("Created"))
bank_mutation = models.ForeignKey("payouts.BankMutation")
issuer_account_number = models.CharField(max_length=100)
currency = models.CharField(max_length=3)
start_date = models.DateField(_("Date started"))
dc = models.CharField(_("Debet/Credit"), max_length=1)
amount = models.DecimalField(decimal_places=2, max_digits=15)
account_number = models.CharField(max_length=100)
account_name = models.CharField(max_length=100)
transaction_type = models.CharField(max_length=10)
invoice_reference = models.CharField(max_length=100)
description_line1 = models.CharField(max_length=100, blank=True, default="")
description_line2 = models.CharField(max_length=100, blank=True, default="")
description_line3 = models.CharField(max_length=100, blank=True, default="")
description_line4 = models.CharField(max_length=100, blank=True, default="")
payout = models.ForeignKey("payouts.Payout", null=True)
def __unicode__(self):
return str(self.start_date) + " " + self.dc + " : " + self.account_name + " [" + self.account_number + "] " + \
" EUR " + str(self.amount)
class BankMutation(models.Model):
created = CreationDateTimeField(_("Created"))
mut_file = models.FileField(_("Uploaded mutation file"), upload_to="bank_mutations", null=True)
mutations = models.TextField(blank=True)
def save(self, force_insert=False, force_update=False, using=None):
super(BankMutation, self).save()
self.mutations = self.mut_file.read()
self.parse_file()
self.mut_file = None
def parse_file(self):
mutations = csv.reader(self.mut_file)
for m in mutations:
if len(m) > 1:
date = m[2]
date = date[0:4] + "-" + date[4:6] + "-" + date[6:]
line = BankMutationLine(issuer_account_number=m[0], currency=m[1], start_date=date, dc=m[3],
amount=m[4], account_number=m[5], account_name=m[6], transaction_type=m[8],
invoice_reference=m[10], description_line1=m[11], description_line2=m[12],
description_line3=m[13], description_line4=m[14],
bank_mutation=self)
line.save()
match_debit_mutations()
def __unicode__(self):
return "Bank Mutations " + str(self.created.strftime('%B %Y'))
@receiver(post_save, weak=False, sender=Project)
def create_payout_for_fully_funded_project(sender, instance, created, **kwargs):
project = instance
now = timezone.now()
# Check projects in phase Act that have asked for money.
if project.phase == ProjectPhases.act and project.projectcampaign.money_asked:
if now.day <= 15:
next_date = timezone.datetime(now.year, now.month, 15)
else:
next_date = timezone.datetime(now.year, now.month + 1, 1)
day = timezone.datetime.strftime(now, '%d%m%Y')
amount = round(project.projectcampaign.money_donated * settings.PROJECT_PAYOUT_RATE)
try:
line = Payout.objects.get(project=project)
if line.status == Payout.PayoutLineStatuses.new:
line.planned = next_date
line.save()
except Payout.DoesNotExist:
line = Payout.objects.create(planned=next_date, project=project, status=Payout.PayoutLineStatuses.new,
amount=amount)
organization = project.projectplan.organization
line.receiver_account_bic = organization.account_bic
line.receiver_account_iban = organization.account_iban
line.receiver_account_number = organization.account_number
line.receiver_account_name = organization.account_name
line.receiver_account_city = organization.account_city
line.receiver_account_country = organization.account_bank_country
line.invoice_reference = 'PP'
line.save()
line.invoice_reference = str(project.id) + '-' + str(line.id)
line.save()
def create_sepa_xml(payouts):
batch_id = timezone.datetime.strftime(timezone.now(), '%Y%m%d%H%I%S')
sepa = SepaDocument(type='CT')
debtor = SepaAccount(name=settings.SEPA['name'], iban=settings.SEPA['iban'], bic=settings.SEPA['bic'])
sepa.set_debtor(debtor)
sepa.set_info(message_identification=batch_id, payment_info_id=batch_id)
sepa.set_initiating_party(name=settings.SEPA['name'], id=settings.SEPA['id'])
for line in payouts.all():
creditor = SepaAccount(name=line.receiver_account_name, iban=line.receiver_account_iban,
bic=line.receiver_account_bic)
sepa.add_credit_transfer(creditor=creditor, amount=line.amount, creditor_payment_id=line.invoice_reference)
return sepa.as_xml()
def match_debit_mutations():
lines = BankMutationLine.objects.filter(dc='D', payout__isnull=True).all()
for line in lines:
date = line.start_date
try:
payout = Payout.objects.filter(invoice_reference=line.invoice_reference).get()
line.matched = True
line.payout_line = payout
line.save()
payout.status = Payout.PayoutLineStatuses.completed
payout.save()
payout.project.payout_date = date
payout.project.save()
except Payout.DoesNotExist:
pass
| |
"""Support for displaying the minimal and the maximal value."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
_LOGGER = logging.getLogger(__name__)
ATTR_MIN_VALUE = "min_value"
ATTR_MAX_VALUE = "max_value"
ATTR_COUNT_SENSORS = "count_sensors"
ATTR_MEAN = "mean"
ATTR_LAST = "last"
ATTR_TO_PROPERTY = [
ATTR_COUNT_SENSORS,
ATTR_MAX_VALUE,
ATTR_MEAN,
ATTR_MIN_VALUE,
ATTR_LAST,
]
CONF_ENTITY_IDS = "entity_ids"
CONF_ROUND_DIGITS = "round_digits"
ICON = "mdi:calculator"
SENSOR_TYPES = {
ATTR_MIN_VALUE: "min",
ATTR_MAX_VALUE: "max",
ATTR_MEAN: "mean",
ATTR_LAST: "last",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=SENSOR_TYPES[ATTR_MAX_VALUE]): vol.All(
cv.string, vol.In(SENSOR_TYPES.values())
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_IDS): cv.entity_ids,
vol.Optional(CONF_ROUND_DIGITS, default=2): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the min/max/mean sensor."""
entity_ids = config.get(CONF_ENTITY_IDS)
name = config.get(CONF_NAME)
sensor_type = config.get(CONF_TYPE)
round_digits = config.get(CONF_ROUND_DIGITS)
async_add_entities(
[MinMaxSensor(hass, entity_ids, name, sensor_type, round_digits)], True
)
return True
def calc_min(sensor_values):
"""Calculate min value, honoring unknown states."""
val = None
for sval in sensor_values:
if sval != STATE_UNKNOWN:
if val is None or val > sval:
val = sval
return val
def calc_max(sensor_values):
"""Calculate max value, honoring unknown states."""
val = None
for sval in sensor_values:
if sval != STATE_UNKNOWN:
if val is None or val < sval:
val = sval
return val
def calc_mean(sensor_values, round_digits):
"""Calculate mean value, honoring unknown states."""
val = 0
count = 0
for sval in sensor_values:
if sval != STATE_UNKNOWN:
val += sval
count += 1
if count == 0:
return None
return round(val / count, round_digits)
class MinMaxSensor(Entity):
"""Representation of a min/max sensor."""
def __init__(self, hass, entity_ids, name, sensor_type, round_digits):
"""Initialize the min/max sensor."""
self._hass = hass
self._entity_ids = entity_ids
self._sensor_type = sensor_type
self._round_digits = round_digits
if name:
self._name = name
else:
self._name = "{} sensor".format(
next(v for k, v in SENSOR_TYPES.items() if self._sensor_type == v)
).capitalize()
self._unit_of_measurement = None
self._unit_of_measurement_mismatch = False
self.min_value = self.max_value = self.mean = self.last = None
self.count_sensors = len(self._entity_ids)
self.states = {}
@callback
def async_min_max_sensor_state_listener(entity, old_state, new_state):
"""Handle the sensor state changes."""
if new_state.state is None or new_state.state in [
STATE_UNKNOWN,
STATE_UNAVAILABLE,
]:
self.states[entity] = STATE_UNKNOWN
hass.async_add_job(self.async_update_ha_state, True)
return
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if self._unit_of_measurement != new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
):
_LOGGER.warning(
"Units of measurement do not match for entity %s", self.entity_id
)
self._unit_of_measurement_mismatch = True
try:
self.states[entity] = float(new_state.state)
self.last = float(new_state.state)
except ValueError:
_LOGGER.warning(
"Unable to store state. " "Only numerical states are supported"
)
hass.async_add_job(self.async_update_ha_state, True)
async_track_state_change(hass, entity_ids, async_min_max_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._unit_of_measurement_mismatch:
return None
return getattr(
self, next(k for k, v in SENSOR_TYPES.items() if self._sensor_type == v)
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._unit_of_measurement_mismatch:
return "ERR"
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return state_attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
async def async_update(self):
"""Get the latest data and updates the states."""
sensor_values = [self.states[k] for k in self._entity_ids if k in self.states]
self.min_value = calc_min(sensor_values)
self.max_value = calc_max(sensor_values)
self.mean = calc_mean(sensor_values, self._round_digits)
| |
# -*- coding: utf-8 -*-
import vim
from orgmode._vim import echo, echom, echoe, ORGMODE, apply_count, repeat, insert_at_cursor, indent_orgmode
from orgmode.menu import Submenu, Separator, ActionEntry, add_cmd_mapping_menu
from orgmode.keybinding import Keybinding, Plug, Command
from orgmode.liborgmode.checkboxes import Checkbox
from orgmode.liborgmode.dom_obj import OrderListType
class EditCheckbox(object):
u"""
Checkbox plugin.
"""
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Edit Checkbox')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
@classmethod
def new_checkbox(cls, below=None):
d = ORGMODE.get_document()
h = d.current_heading()
if h is None:
return
# init checkboxes for current heading
h.init_checkboxes()
c = h.current_checkbox()
nc = Checkbox()
nc._heading = h
# default checkbox level
level = h.level
start = vim.current.window.cursor[0] - 1
# if no checkbox is found, insert at current line with indent level=1
if c is None:
if h.checkboxes:
level = h.first_checkbox.level
h.checkboxes.append(nc)
else:
l = c.get_parent_list()
idx = c.get_index_in_parent_list()
if l is not None and idx is not None:
l.insert(idx + (1 if below else 0), nc)
# workaround for broken associations, Issue #165
nc._parent = c.parent
if below:
if c.next_sibling:
c.next_sibling._previous_sibling = nc
nc._next_sibling = c.next_sibling
c._next_sibling = nc
nc._previous_sibling = c
else:
if c.previous_sibling:
c.previous_sibling._next_sibling = nc
nc._next_sibling = c
nc._previous_sibling = c.previous_sibling
c._previous_sibling = nc
t = c.type
# increase key for ordered lists
if t[-1] in OrderListType:
try:
num = int(t[:-1]) + (1 if below else -1)
t = '%d%s' % (num, t[-1])
except ValueError:
try:
char = ord(t[:-1]) + (1 if below else -1)
t = '%s%s' % (chr(char), t[-1])
except ValueError:
pass
nc.type = t
if not c.status:
nc.status = None
level = c.level
if below:
start = c.end_of_last_child
else:
start = c.start
nc.level = level
vim.current.window.cursor = (start + 1, 0)
if below:
vim.command("normal o")
else:
vim.command("normal O")
insert_at_cursor(str(nc))
vim.command("call feedkeys('a')")
@classmethod
def toggle(cls, checkbox=None):
u"""
Toggle the checkbox given in the parameter.
If the checkbox is not given, it will toggle the current checkbox.
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
# init checkboxes for current heading
if current_heading is None:
return
current_heading = current_heading.init_checkboxes()
if checkbox is None:
# get current_checkbox
c = current_heading.current_checkbox()
# no checkbox found
if c is None:
cls.update_checkboxes_status()
return
else:
c = checkbox
if c.status == Checkbox.STATUS_OFF:
# set checkbox status on if all children are on
if not c.children or c.are_children_all(Checkbox.STATUS_ON):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_ON:
if not c.children or c.is_child_one(Checkbox.STATUS_OFF):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_INT:
# can't toggle intermediate state directly according to emacs orgmode
pass
# update checkboxes status
cls.update_checkboxes_status()
@classmethod
def _update_subtasks(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
# update heading subtask info
c = h.first_checkbox
if c is None:
return
total, on = c.all_siblings_status()
h.update_subtasks(total, on)
# update all checkboxes under current heading
cls._update_checkboxes_subtasks(c)
@classmethod
def _update_checkboxes_subtasks(cls, checkbox):
# update checkboxes
for c in checkbox.all_siblings():
if c.children:
total, on = c.first_child.all_siblings_status()
c.update_subtasks(total, on)
cls._update_checkboxes_subtasks(c.first_child)
@classmethod
def update_checkboxes_status(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
cls._update_checkboxes_status(h.first_checkbox)
cls._update_subtasks()
@classmethod
def _update_checkboxes_status(cls, checkbox=None):
u""" helper function for update checkboxes status
:checkbox: The first checkbox of this indent level
:return: The status of the parent checkbox
"""
if checkbox is None:
return
status_off, status_on, status_int, total = 0, 0, 0, 0
# update all top level checkboxes' status
for c in checkbox.all_siblings():
current_status = c.status
# if this checkbox is not leaf, its status should determine by all its children
if c.children:
current_status = cls._update_checkboxes_status(c.first_child)
# don't update status if the checkbox has no status
if c.status is None:
current_status = None
# the checkbox needs to have status
else:
total += 1
# count number of status in this checkbox level
if current_status == Checkbox.STATUS_OFF:
status_off += 1
elif current_status == Checkbox.STATUS_ON:
status_on += 1
elif current_status == Checkbox.STATUS_INT:
status_int += 1
# write status if any update
if current_status is not None and c.status != current_status:
c.status = current_status
d = ORGMODE.get_document()
d.write_checkbox(c)
parent_status = Checkbox.STATUS_INT
# all silbing checkboxes are off status
if status_off == total:
parent_status = Checkbox.STATUS_OFF
# all silbing checkboxes are on status
elif status_on == total:
parent_status = Checkbox.STATUS_ON
# one silbing checkbox is on or int status
elif status_on != 0 or status_int != 0:
parent_status = Checkbox.STATUS_INT
# other cases
else:
parent_status = None
return parent_status
def register(self):
u"""
Registration of the plugin.
Key bindings and other initialization should be done here.
"""
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewAbove',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox()<CR>',
key_mapping=u'<localleader>cN',
menu_desrc=u'New CheckBox Above'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewBelow',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox(below=True)<CR>',
key_mapping=u'<localleader>cn',
menu_desrc=u'New CheckBox Below'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxToggle',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].toggle()<CR>',
key_mapping=u'<localleader>cc',
menu_desrc=u'Toggle Checkbox'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxUpdate',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].update_checkboxes_status()<CR>',
key_mapping=u'<localleader>c#',
menu_desrc=u'Update Subtasks'
)
# vim: set noexpandtab:
| |
from __future__ import absolute_import, division
import logging
from lxml import etree
from changes.config import db, statsreporter
from changes.constants import Result
from changes.db.utils import try_create
from changes.models import TestResult, TestResultManager, FailureReason
from changes.utils.agg import aggregate_result
from .base import ArtifactHandler
class XunitHandler(ArtifactHandler):
logger = logging.getLogger('xunit')
def process(self, fp):
test_list = self.get_tests(fp)
manager = TestResultManager(self.step)
manager.save(test_list)
return test_list
@statsreporter.timer('xunithandler_get_tests')
def get_tests(self, fp):
try:
# libxml has a limit on the size of a text field by default, but we encode stdout/stderr.
#
# Its not good to have such huge text fields in the first place but we still want to
# avoid hard failing here if we do.
parser = etree.XMLParser(huge_tree=True)
root = etree.fromstring(fp.read(), parser=parser)
except Exception:
# Record the JobStep ID so we have any hope of tracking these down.
self.logger.exception('Failed to parse XML; (step={})'.format(self.step.id.hex))
try_create(FailureReason, {
'step_id': self.step.id,
'job_id': self.step.job_id,
'build_id': self.step.job.build_id,
'project_id': self.step.project_id,
'reason': 'malformed_artifact'
})
db.session.commit()
return []
if root.tag == 'unittest-results':
return self.get_bitten_tests(root)
return self.get_xunit_tests(root)
def get_bitten_tests(self, root):
step = self.step
results = []
# XXX(dcramer): bitten xml syntax, no clue what this
for node in root.iter('test'):
# classname, name, time
attrs = dict(node.items())
# AFAIK the spec says only one tag can be present
# http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
if attrs['status'] == 'success':
result = Result.passed
elif attrs['status'] == 'skipped':
result = Result.skipped
elif attrs['status'] in ('error', 'failure'):
result = Result.failed
else:
result = None
try:
message = list(node.iter('traceback'))[0].text
except IndexError:
message = ''
# no matching status tags were found
if result is None:
result = Result.passed
results.append(TestResult(
step=step,
name=attrs['name'],
package=attrs.get('fixture') or None,
duration=float(attrs['duration']) * 1000,
result=result,
message=message,
))
return results
def get_xunit_tests(self, root):
step = self.step
results = []
for node in root.iter('testcase'):
# classname, name, time
attrs = dict(node.items())
# AFAIK the spec says only one tag can be present
# http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
try:
r_node = list(node.iterchildren())[0]
except IndexError:
result = Result.passed
message = ''
else:
# TODO(cramer): whitelist tags that are not statuses
if r_node.tag == 'failure':
result = Result.failed
elif r_node.tag == 'skipped':
result = Result.skipped
elif r_node.tag == 'error':
result = Result.failed
else:
result = None
message = r_node.text
# If there's a previous failure in addition to stdout or stderr,
# prioritize showing the previous failure because that's what's
# useful for debugging flakiness.
message = attrs.get("last_failure_output") or message
# no matching status tags were found
if result is None:
result = Result.passed
if attrs.get('quarantined'):
if result == Result.passed:
result = Result.quarantined_passed
elif result == Result.failed:
result = Result.quarantined_failed
elif result == Result.skipped:
result = Result.quarantined_skipped
if attrs.get('time'):
duration = float(attrs['time']) * 1000
else:
duration = None
results.append(TestResult(
step=step,
name=attrs['name'],
package=attrs.get('classname') or None,
duration=duration,
result=result,
message=message,
reruns=int(attrs.get('rerun')) if attrs.get('rerun') else None,
artifacts=self._get_testartifacts(node)
))
results = _deduplicate_testresults(results)
return results
def _get_testartifacts(self, node):
test_artifacts_node = node.find('test-artifacts')
if test_artifacts_node is None:
return None
results = []
for artifact_node in node.iter('artifact'):
attrs = dict(artifact_node.items())
results.append(attrs)
return results
def _deduplicate_testresults(results):
"""Combine TestResult objects until every package+name is unique.
The traditions surrounding jUnit do not prohibit a single test from
producing two or more <testcase> elements. In fact, py.test itself
will produce two such elements for a single test if the test both
fails and then hits an error during tear-down. To impedance-match
this situation with the Changes constraint of one result per test,
we combine <testcase> elements that belong to the same test.
"""
result_dict = {}
deduped = []
for result in results:
key = (result.package, result.name)
existing_result = result_dict.get(key)
if existing_result is not None:
e, r = existing_result, result
e.duration = _careful_add(e.duration, r.duration)
e.result = aggregate_result((e.result, r.result))
e.message += '\n\n' + r.message
e.reruns = _careful_add(e.reruns, r.reruns)
e.artifacts = _careful_add(e.artifacts, r.artifacts)
else:
result_dict[key] = result
deduped.append(result)
return deduped
def _careful_add(a, b):
"""Return the sum `a + b`, else whichever is not `None`, else `None`."""
if a is None:
return b
if b is None:
return a
return a + b
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import datetime
import os
import six
from .model_base import Model, AccessControlledModel
from girder import auditLogger, events
from girder.constants import AccessType, CoreEventHandler, SettingKey
from girder.exceptions import FilePathException, ValidationException
from girder.models.setting import Setting
from girder.utility import acl_mixin
from girder.utility import path as path_util
class File(acl_mixin.AccessControlMixin, Model):
"""
This model represents a File, which is stored in an assetstore.
"""
def initialize(self):
from girder.utility import assetstore_utilities
self.name = 'file'
self.ensureIndices(
['itemId', 'assetstoreId', 'exts'] +
assetstore_utilities.fileIndexFields())
self.ensureTextIndex({'name': 1})
self.resourceColl = 'item'
self.resourceParent = 'itemId'
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'mimeType', 'itemId', 'exts', 'name', 'created', 'creatorId',
'size', 'updated', 'linkUrl'))
self.exposeFields(level=AccessType.SITE_ADMIN, fields=('assetstoreId',))
events.bind('model.file.save.created',
CoreEventHandler.FILE_PROPAGATE_SIZE,
self._propagateSizeToItem)
def remove(self, file, updateItemSize=True, **kwargs):
"""
Use the appropriate assetstore adapter for whatever assetstore the
file is stored in, and call deleteFile on it, then delete the file
record from the database.
:param file: The file document to remove.
:param updateItemSize: Whether to update the item size. Only set this
to False if you plan to delete the item and do not care about
updating its size.
"""
from .item import Item
if file.get('assetstoreId'):
self.getAssetstoreAdapter(file).deleteFile(file)
if file['itemId']:
item = Item().load(file['itemId'], force=True)
# files that are linkUrls might not have a size field
if 'size' in file:
self.propagateSizeChange(item, -file['size'], updateItemSize)
Model.remove(self, file)
def download(self, file, offset=0, headers=True, endByte=None,
contentDisposition=None, extraParameters=None):
"""
Use the appropriate assetstore adapter for whatever assetstore the
file is stored in, and call downloadFile on it. If the file is a link
file rather than a file in an assetstore, we redirect to it.
:param file: The file to download.
:param offset: The start byte within the file.
:type offset: int
:param headers: Whether to set headers (i.e. is this an HTTP request
for a single file, or something else).
:type headers: bool
:param endByte: Final byte to download. If ``None``, downloads to the
end of the file.
:type endByte: int or None
:param contentDisposition: Content-Disposition response header
disposition-type value.
:type contentDisposition: str or None
:type extraParameters: str or None
"""
events.trigger('model.file.download.request', info={
'file': file,
'startByte': offset,
'endByte': endByte})
auditLogger.info('file.download', extra={
'details': {
'fileId': file['_id'],
'startByte': offset,
'endByte': endByte,
'extraParameters': extraParameters
}
})
if file.get('assetstoreId'):
try:
fileDownload = self.getAssetstoreAdapter(file).downloadFile(
file, offset=offset, headers=headers, endByte=endByte,
contentDisposition=contentDisposition,
extraParameters=extraParameters)
def downloadGenerator():
for data in fileDownload():
yield data
if endByte is None or endByte >= file['size']:
events.trigger('model.file.download.complete', info={
'file': file,
'startByte': offset,
'endByte': endByte,
'redirect': False})
return downloadGenerator
except cherrypy.HTTPRedirect:
events.trigger('model.file.download.complete', info={
'file': file,
'startByte': offset,
'endByte': endByte,
'redirect': True})
raise
elif file.get('linkUrl'):
if headers:
events.trigger('model.file.download.complete', info={
'file': file,
'startByte': offset,
'endByte': endByte,
'redirect': True})
raise cherrypy.HTTPRedirect(file['linkUrl'])
else:
endByte = endByte or len(file['linkUrl'])
def stream():
yield file['linkUrl'][offset:endByte]
if endByte >= len(file['linkUrl']):
events.trigger('model.file.download.complete', info={
'file': file,
'startByte': offset,
'endByte': endByte,
'redirect': False})
return stream
else:
raise Exception('File has no known download mechanism.')
def validate(self, doc):
if doc.get('assetstoreId') is None:
if 'linkUrl' not in doc:
raise ValidationException(
'File must have either an assetstore ID or a link URL.',
'linkUrl')
doc['linkUrl'] = doc['linkUrl'].strip()
if not doc['linkUrl'].startswith(('http:', 'https:')):
raise ValidationException(
'Linked file URL must start with http: or https:.',
'linkUrl')
if doc.get('assetstoreType'):
# If assetstore model is overridden, make sure it's a valid model
self._getAssetstoreModel(doc)
if 'name' not in doc or not doc['name']:
raise ValidationException('File name must not be empty.', 'name')
doc['exts'] = [ext.lower() for ext in doc['name'].split('.')[1:]]
return doc
def _getAssetstoreModel(self, file):
from .assetstore import Assetstore
if file.get('assetstoreType'):
try:
if isinstance(file['assetstoreType'], six.string_types):
return self.model(file['assetstoreType'])
else:
return self.model(*file['assetstoreType'])
except Exception:
raise ValidationException(
'Invalid assetstore type: %s.' % (file['assetstoreType'],))
else:
return Assetstore()
def createLinkFile(self, name, parent, parentType, url, creator, size=None,
mimeType=None, reuseExisting=False):
"""
Create a file that is a link to a URL, rather than something we maintain
in an assetstore.
:param name: The local name for the file.
:type name: str
:param parent: The parent object for this file.
:type parent: girder.models.folder or girder.models.item
:param parentType: The parent type (folder or item)
:type parentType: str
:param url: The URL that this file points to
:param creator: The user creating the file.
:type creator: dict
:param size: The size of the file in bytes. (optional)
:type size: int
:param mimeType: The mimeType of the file. (optional)
:type mimeType: str
:param reuseExisting: If a file with the same name already exists in
this location, return it rather than creating a new file.
:type reuseExisting: bool
"""
from .item import Item
if parentType == 'folder':
# Create a new item with the name of the file.
item = Item().createItem(
name=name, creator=creator, folder=parent, reuseExisting=reuseExisting)
elif parentType == 'item':
item = parent
existing = None
if reuseExisting:
existing = self.findOne({
'itemId': item['_id'],
'name': name
})
if existing:
file = existing
else:
file = {
'created': datetime.datetime.utcnow(),
'itemId': item['_id'],
'assetstoreId': None,
'name': name
}
file.update({
'creatorId': creator['_id'],
'mimeType': mimeType,
'linkUrl': url
})
if size is not None:
file['size'] = int(size)
try:
if existing:
file = self.updateFile(file)
else:
file = self.save(file)
return file
except ValidationException:
if parentType == 'folder':
Item().remove(item)
raise
def propagateSizeChange(self, item, sizeIncrement, updateItemSize=True):
"""
Propagates a file size change (or file creation) to the necessary
parents in the hierarchy. Internally, this records subtree size in
the item, the parent folder, and the root node under which the item
lives. Should be called anytime a new file is added, a file is
deleted, or a file size changes.
:param item: The parent item of the file.
:type item: dict
:param sizeIncrement: The change in size to propagate.
:type sizeIncrement: int
:param updateItemSize: Whether the item size should be updated. Set to
False if you plan to delete the item immediately and don't care to
update its size.
"""
from .folder import Folder
from .item import Item
if updateItemSize:
# Propagate size up to item
Item().increment(query={
'_id': item['_id']
}, field='size', amount=sizeIncrement, multi=False)
# Propagate size to direct parent folder
Folder().increment(query={
'_id': item['folderId']
}, field='size', amount=sizeIncrement, multi=False)
# Propagate size up to root data node
self.model(item['baseParentType']).increment(query={
'_id': item['baseParentId']
}, field='size', amount=sizeIncrement, multi=False)
def createFile(self, creator, item, name, size, assetstore, mimeType=None,
saveFile=True, reuseExisting=False, assetstoreType=None):
"""
Create a new file record in the database.
:param item: The parent item.
:param creator: The user creating the file.
:param assetstore: The assetstore this file is stored in.
:param name: The filename.
:type name: str
:param size: The size of the file in bytes.
:type size: int
:param mimeType: The mimeType of the file.
:type mimeType: str
:param saveFile: if False, don't save the file, just return it.
:type saveFile: bool
:param reuseExisting: If a file with the same name already exists in
this location, return it rather than creating a new file.
:type reuseExisting: bool
:param assetstoreType: If a model other than assetstore will be used to
initialize the assetstore adapter for this file, use this parameter to
specify it. If it's a core model, pass its string name. If it's a plugin
model, use a 2-tuple of the form (modelName, pluginName).
:type assetstoreType: str or tuple
"""
if reuseExisting:
existing = self.findOne({
'itemId': item['_id'],
'name': name
})
if existing:
return existing
file = {
'created': datetime.datetime.utcnow(),
'creatorId': creator['_id'],
'assetstoreId': assetstore['_id'],
'name': name,
'mimeType': mimeType,
'size': size,
'itemId': item['_id'] if item else None
}
if assetstoreType:
file['assetstoreType'] = assetstoreType
if saveFile:
return self.save(file)
return file
def _propagateSizeToItem(self, event):
"""
This callback updates an item's size to include that of a newly-created
file.
This generally should not be called or overridden directly. This should
not be unregistered, as that would cause item, folder, and collection
sizes to be inaccurate.
"""
# This task is not performed in "createFile", in case
# "saveFile==False". The item size should be updated only when it's
# certain that the file will actually be saved. It is also possible for
# "model.file.save" to set "defaultPrevented", which would prevent the
# item from being saved initially.
from .item import Item
fileDoc = event.info
itemId = fileDoc.get('itemId')
if itemId and fileDoc.get('size'):
item = Item().load(itemId, force=True)
self.propagateSizeChange(item, fileDoc['size'])
def updateFile(self, file):
"""
Call this when changing properties of an existing file, such as name
or MIME type. This causes the updated stamp to change, and also alerts
the underlying assetstore adapter that file information has changed.
"""
file['updated'] = datetime.datetime.utcnow()
file = self.save(file)
if file.get('assetstoreId'):
self.getAssetstoreAdapter(file).fileUpdated(file)
return file
def getAssetstoreAdapter(self, file):
"""
Return the assetstore adapter for the given file.
"""
from girder.utility import assetstore_utilities
assetstore = self._getAssetstoreModel(file).load(file['assetstoreId'])
return assetstore_utilities.getAssetstoreAdapter(assetstore)
def copyFile(self, srcFile, creator, item=None):
"""
Copy a file so that we don't need to duplicate stored data.
:param srcFile: The file to copy.
:type srcFile: dict
:param creator: The user copying the file.
:param item: a new item to assign this file to (optional)
:returns: a dict with the new file.
"""
# Copy the source file's dictionary. The individual assetstore
# implementations will need to fix references if they cannot be
# directly duplicated.
file = srcFile.copy()
# Immediately delete the original id so that we get a new one.
del file['_id']
file['copied'] = datetime.datetime.utcnow()
file['copierId'] = creator['_id']
if item:
file['itemId'] = item['_id']
if file.get('assetstoreId'):
self.getAssetstoreAdapter(file).copyFile(srcFile, file)
elif file.get('linkUrl'):
file['linkUrl'] = srcFile['linkUrl']
return self.save(file)
def isOrphan(self, file):
"""
Returns True if this file is orphaned (its item or attached entity is
missing).
:param file: The file to check.
:type file: dict
"""
if file.get('attachedToId'):
attachedToType = file.get('attachedToType')
if isinstance(attachedToType, six.string_types):
modelType = self.model(attachedToType)
elif isinstance(attachedToType, list) and len(attachedToType) == 2:
modelType = self.model(*attachedToType)
else:
# Invalid 'attachedToType'
return True
if isinstance(modelType, (acl_mixin.AccessControlMixin,
AccessControlledModel)):
attachedDoc = modelType.load(
file.get('attachedToId'), force=True)
else:
attachedDoc = modelType.load(
file.get('attachedToId'))
else:
from .item import Item
attachedDoc = Item().load(file.get('itemId'), force=True)
return not attachedDoc
def updateSize(self, file):
"""
Returns the size of this file. Does not currently check the underlying
assetstore to verify the size.
:param file: The file.
:type file: dict
"""
# TODO: check underlying assetstore for size?
return file.get('size', 0), 0
def open(self, file):
"""
Use this to expose a Girder file as a python file-like object. At the
moment, this is a read-only interface, the equivalent of opening a
system file with ``'rb'`` mode. This can also be used as a context
manager, e.g.:
>>> with File().open(file) as fh:
>>> while True:
>>> chunk = fh.read(CHUNK_LEN)
>>> if not chunk:
>>> break
Using it this way will automatically close the file handle for you when
the ``with`` block is left.
:param file: A Girder file document.
:type file: dict
:return: A file-like object containing the bytes of the file.
:rtype: girder.utility.abstract_assetstore_adapter.FileHandle
"""
return self.getAssetstoreAdapter(file).open(file)
def getGirderMountFilePath(self, file, validate=True):
"""
If possible, get the path of the file on a local girder mount.
:param file: The file document.
:param validate: if True, check if the path exists and raise an
exception if it does not.
:returns: a girder mount path to the file or None if no such path is
available.
"""
mount = Setting().get(SettingKey.GIRDER_MOUNT_INFORMATION, None)
if mount:
path = mount['path'].rstrip('/') + path_util.getResourcePath('file', file, force=True)
if not validate or os.path.exists(path):
return path
if validate:
raise FilePathException('This file isn\'t accessible from a Girder mount.')
def getLocalFilePath(self, file):
"""
If an assetstore adapter supports it, return a path to the file on the
local file system.
:param file: The file document.
:returns: a local path to the file or None if no such path is known.
"""
adapter = self.getAssetstoreAdapter(file)
try:
return adapter.getLocalFilePath(file)
except FilePathException as exc:
try:
return self.getGirderMountFilePath(file, True)
except Exception:
# If getting a Girder mount path throws, raise the original
# exception
pass
raise exc
| |
#!/usr/bin/env python
#
# # Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Runs the tests.
There are a few initialization issues to deal with.
The first is flags, which must be initialized before any imports. The test
configuration has the same problem (it was based on flags back when the tests
resided outside of the Nova code).
The command line is picked apart so that Nose won't see commands it isn't
compatible with, such as "--flagfile" or "--group".
This script imports all other tests to make them known to Proboscis before
passing control to proboscis.TestProgram which itself calls nose, which then
call unittest.TestProgram and exits.
If "repl" is a command line argument, then the original stdout and stderr is
saved and sys.exit is neutralized so that unittest.TestProgram will not exit
and instead sys.stdout and stderr are restored so that interactive mode can
be used.
"""
from __future__ import absolute_import
import atexit
import gettext
import logging
import os
import time
import unittest
import sys
import proboscis
from nose import config
from nose import core
from tests.colorizer import NovaTestRunner
if os.environ.get("PYDEV_DEBUG", "False") == 'True':
from pydev import pydevd
pydevd.settrace('10.0.2.2', port=7864, stdoutToServer=True,
stderrToServer=True)
def add_support_for_localization():
"""Adds support for localization in the logging.
If ../nova/__init__.py exists, add ../ to Python search path, so that
it will override what happens to be installed in
/usr/(local/)lib/python...
"""
path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)
possible_topdir = os.path.normpath(path)
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
MAIN_RUNNER = None
def initialize_rdl_config(config_file):
from trove.common import cfg
from oslo_log import log
from trove.db import get_db_api
conf = cfg.CONF
cfg.parse_args(['int_tests'], default_config_files=[config_file])
log.setup(conf, None)
try:
get_db_api().configure_db(conf)
conf_file = conf.find_file(conf.api_paste_config)
except RuntimeError as error:
import traceback
print(traceback.format_exc())
sys.exit("ERROR: %s" % error)
def _clean_up():
"""Shuts down any services this program has started and shows results."""
from tests.util import report
report.update()
if MAIN_RUNNER is not None:
MAIN_RUNNER.on_exit()
from tests.util.services import get_running_services
for service in get_running_services():
sys.stderr.write("Stopping service ")
for c in service.cmd:
sys.stderr.write(c + " ")
sys.stderr.write("...\n\r")
service.stop()
def import_tests():
# TODO(tim.simpson): Import these again once white box test functionality
# is restored.
# from tests.dns import check_domain
# from tests.dns import concurrency
# from tests.dns import conversion
# The DNS stuff is problematic. Not loading the other tests allow us to
# run its functional tests only.
ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True'
if not ADD_DOMAINS:
from tests.api import delete_all
from tests.api import instances_pagination
from tests.api import instances_quotas
from tests.api import instances_states
from tests.dns import dns
from tests import initialize
from tests.smoke import instance
from tests.volumes import driver
# Groups that exist as core int-tests are registered from the
# trove.tests.int_tests module
from trove.tests import int_tests
# Groups defined in trove/integration, or any other externally
# defined groups can be registered here
heavy_black_box_groups = [
"dbaas.api.instances.pagination",
"dbaas.api.instances.delete",
"dbaas.api.instances.status",
"dbaas.api.instances.down",
"dbaas.api.mgmt.hosts.update",
"fake.dbaas.api.mgmt.instances",
"fake.dbaas.api.mgmt.accounts.broken",
"fake.dbaas.api.mgmt.allaccounts"
]
proboscis.register(groups=["heavy_blackbox"],
depends_on_groups=heavy_black_box_groups)
def run_main(test_importer):
add_support_for_localization()
# Strip non-nose arguments out before passing this to nosetests
repl = False
nose_args = []
conf_file = "~/test.conf"
show_elapsed = True
groups = []
print("RUNNING TEST ARGS : " + str(sys.argv))
extra_test_conf_lines = []
rdl_config_file = None
nova_flag_file = None
index = 0
while index < len(sys.argv):
arg = sys.argv[index]
if arg[:2] == "-i" or arg == '--repl':
repl = True
elif arg[:7] == "--conf=":
conf_file = os.path.expanduser(arg[7:])
print("Setting TEST_CONF to " + conf_file)
os.environ["TEST_CONF"] = conf_file
elif arg[:8] == "--group=":
groups.append(arg[8:])
elif arg == "--test-config":
if index >= len(sys.argv) - 1:
print('Expected an argument to follow "--test-conf".')
sys.exit()
conf_line = sys.argv[index + 1]
extra_test_conf_lines.append(conf_line)
elif arg[:11] == "--flagfile=":
pass
elif arg[:14] == "--config-file=":
rdl_config_file = arg[14:]
elif arg[:13] == "--nova-flags=":
nova_flag_file = arg[13:]
elif arg.startswith('--hide-elapsed'):
show_elapsed = False
else:
nose_args.append(arg)
index += 1
# Many of the test decorators depend on configuration values, so before
# start importing modules we have to load the test config followed by the
# flag files.
from trove.tests.config import CONFIG
# Find config file.
if not "TEST_CONF" in os.environ:
raise RuntimeError("Please define an environment variable named " +
"TEST_CONF with the location to a conf file.")
file_path = os.path.expanduser(os.environ["TEST_CONF"])
if not os.path.exists(file_path):
raise RuntimeError("Could not find TEST_CONF at " + file_path + ".")
# Load config file and then any lines we read from the arguments.
CONFIG.load_from_file(file_path)
for line in extra_test_conf_lines:
CONFIG.load_from_line(line)
if CONFIG.white_box: # If white-box testing, set up the flags.
# Handle loading up RDL's config file madness.
initialize_rdl_config(rdl_config_file)
# Set up the report, and print out how we're running the tests.
from tests.util import report
from datetime import datetime
report.log("Trove Integration Tests, %s" % datetime.now())
report.log("Invoked via command: " + str(sys.argv))
report.log("Groups = " + str(groups))
report.log("Test conf file = %s" % os.environ["TEST_CONF"])
if CONFIG.white_box:
report.log("")
report.log("Test config file = %s" % rdl_config_file)
report.log("")
report.log("sys.path:")
for path in sys.path:
report.log("\t%s" % path)
# Now that all configurations are loaded its time to import everything
test_importer()
atexit.register(_clean_up)
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c,
show_elapsed=show_elapsed,
known_bugs=CONFIG.known_bugs)
MAIN_RUNNER = runner
if repl:
# Turn off the following "feature" of the unittest module in case
# we want to start a REPL.
sys.exit = lambda x: None
proboscis.TestProgram(argv=nose_args, groups=groups, config=c,
testRunner=MAIN_RUNNER).run_and_exit()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if __name__ == "__main__":
run_main(import_tests)
| |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metropolis-Hastings Monte Carlo.
NOTE: these functions operate on batches of MCMC configurations and should not
be vmapped.
"""
from ferminet import constants
import jax
from jax import lax
from jax import numpy as jnp
def _harmonic_mean(x, atoms):
"""Calculates the harmonic mean of each electron distance to the nuclei.
Args:
x: electron positions. Shape (batch, nelectrons, 1, ndim). Note the third
dimension is already expanded, which allows for avoiding additional
reshapes in the MH algorithm.
atoms: atom positions. Shape (natoms, ndim)
Returns:
Array of shape (batch, nelectrons, 1, 1), where the (i, j, 0, 0) element is
the harmonic mean of the distance of the j-th electron of the i-th MCMC
configuration to all atoms.
"""
ae = x - atoms[None, ...]
r_ae = jnp.linalg.norm(ae, axis=-1, keepdims=True)
return 1.0 / jnp.mean(1.0 / r_ae, axis=-2, keepdims=True)
def _log_prob_gaussian(x, mu, sigma):
"""Calculates the log probability of Gaussian with diagonal covariance.
Args:
x: Positions. Shape (batch, nelectron, 1, ndim) - as used in mh_update.
mu: means of Gaussian distribution. Same shape as or broadcastable to x.
sigma: standard deviation of the distribution. Same shape as or
broadcastable to x.
Returns:
Log probability of Gaussian distribution with shape as required for
mh_update - (batch, nelectron, 1, 1).
"""
numer = jnp.sum(-0.5 * ((x - mu)**2) / (sigma**2), axis=[1, 2, 3])
denom = x.shape[-1] * jnp.sum(jnp.log(sigma), axis=[1, 2, 3])
return numer - denom
def mh_update(params,
f,
x1,
key,
lp_1,
num_accepts,
stddev=0.02,
atoms=None,
i=0):
"""Performs one Metropolis-Hastings step using an all-electron move.
Args:
params: Wavefuncttion parameters.
f: Callable with signature f(params, x) which returns the log of the
wavefunction (i.e. the sqaure root of the log probability of x).
x1: Initial MCMC configurations. Shape (batch, nelectrons*ndim).
key: RNG state.
lp_1: log probability of f evaluated at x1 given parameters params.
num_accepts: Number of MH move proposals accepted.
stddev: width of Gaussian move proposal.
atoms: If not None, atom positions. Shape (natoms, 3). If present, then the
Metropolis-Hastings move proposals are drawn from a Gaussian distribution,
N(0, (h_i stddev)^2), where h_i is the harmonic mean of distances between
the i-th electron and the atoms, otherwise the move proposal drawn from
N(0, stddev^2).
i: Ignored.
Returns:
(x, key, lp, num_accepts), where:
x: Updated MCMC configurations.
key: RNG state.
lp: log probability of f evaluated at x.
num_accepts: update running total of number of accepted MH moves.
"""
del i # electron index ignored for all-electron moves
key, subkey = jax.random.split(key)
if atoms is None: # symmetric proposal, same stddev everywhere
x2 = x1 + stddev * jax.random.normal(subkey, shape=x1.shape) # proposal
lp_2 = 2. * f(params, x2) # log prob of proposal
ratio = lp_2 - lp_1
else: # asymmetric proposal, stddev propto harmonic mean of nuclear distances
n = x1.shape[0]
x1 = jnp.reshape(x1, [n, -1, 1, 3])
hmean1 = _harmonic_mean(x1, atoms) # harmonic mean of distances to nuclei
x2 = x1 + stddev * hmean1 * jax.random.normal(subkey, shape=x1.shape)
lp_2 = 2. * f(params, x2) # log prob of proposal
hmean2 = _harmonic_mean(x2, atoms) # needed for probability of reverse jump
lq_1 = _log_prob_gaussian(x1, x2, stddev * hmean1) # forward probability
lq_2 = _log_prob_gaussian(x2, x1, stddev * hmean2) # reverse probability
ratio = lp_2 + lq_2 - lp_1 - lq_1
x1 = jnp.reshape(x1, [n, -1])
x2 = jnp.reshape(x2, [n, -1])
key, subkey = jax.random.split(key)
rnd = jnp.log(jax.random.uniform(subkey, shape=lp_1.shape))
cond = ratio > rnd
x_new = jnp.where(cond[..., None], x2, x1)
lp_new = jnp.where(cond, lp_2, lp_1)
num_accepts += jnp.sum(cond)
return x_new, key, lp_new, num_accepts
def mh_one_electron_update(params,
f,
x1,
key,
lp_1,
num_accepts,
stddev=0.02,
atoms=None,
i=0):
"""Performs one Metropolis-Hastings step for a single electron.
Args:
params: Wavefuncttion parameters.
f: Callable with signature f(params, x) which returns the log of the
wavefunction (i.e. the sqaure root of the log probability of x).
x1: Initial MCMC configurations. Shape (batch, nelectrons*ndim).
key: RNG state.
lp_1: log probability of f evaluated at x1 given parameters params.
num_accepts: Number of MH move proposals accepted.
stddev: width of Gaussian move proposal.
atoms: Ignored. Asymmetric move proposals are not implemented for
single-electron moves.
i: index of electron to move.
Returns:
(x, key, lp, num_accepts), where:
x: Updated MCMC configurations.
key: RNG state.
lp: log probability of f evaluated at x.
num_accepts: update running total of number of accepted MH moves.
Raises:
NotImplementedError: if atoms is supplied.
"""
key, subkey = jax.random.split(key)
n = x1.shape[0]
x1 = jnp.reshape(x1, [n, -1, 1, 3])
nelec = x1.shape[1]
ii = i % nelec
if atoms is None: # symmetric proposal, same stddev everywhere
x2 = x1.at[:, ii].add(stddev *
jax.random.normal(subkey, shape=x1[:, ii].shape))
lp_2 = 2. * f(params, x2) # log prob of proposal
ratio = lp_2 - lp_1
else: # asymmetric proposal, stddev propto harmonic mean of nuclear distances
raise NotImplementedError('Still need to work out reverse probabilities '
'for asymmetric moves.')
x1 = jnp.reshape(x1, [n, -1])
x2 = jnp.reshape(x2, [n, -1])
key, subkey = jax.random.split(key)
rnd = jnp.log(jax.random.uniform(subkey, shape=lp_1.shape))
cond = ratio > rnd
x_new = jnp.where(cond[..., None], x2, x1)
lp_new = jnp.where(cond, lp_2, lp_1)
num_accepts += jnp.sum(cond)
return x_new, key, lp_new, num_accepts
def make_mcmc_step(batch_network,
batch_per_device,
steps=10,
atoms=None,
one_electron_moves=False):
"""Creates the MCMC step function.
Args:
batch_network: function, signature (params, x), which evaluates the log of
the wavefunction (square root of the log probability distribution) at x
given params. Inputs and outputs are batched.
batch_per_device: Batch size per device.
steps: Number of MCMC moves to attempt in a single call to the MCMC step
function.
atoms: atom positions. If given, an asymmetric move proposal is used based
on the harmonic mean of electron-atom distances for each electron.
Otherwise the (conventional) normal distribution is used.
one_electron_moves: If true, attempt to move one electron at a time.
Otherwise, attempt one all-electron move per MCMC step.
Returns:
Callable which performs the set of MCMC steps.
"""
inner_fun = mh_one_electron_update if one_electron_moves else mh_update
@jax.jit
def mcmc_step(params, data, key, width):
"""Performs a set of MCMC steps.
Args:
params: parameters to pass to the network.
data: (batched) MCMC configurations to pass to the network.
key: RNG state.
width: standard deviation to use in the move proposal.
Returns:
(data, pmove), where data is the updated MCMC configurations, key the
updated RNG state and pmove the average probability a move was accepted.
"""
def step_fn(i, x):
return inner_fun(
params, batch_network, *x, stddev=width, atoms=atoms, i=i)
nelec = data.shape[-1] // 3
nsteps = nelec * steps if one_electron_moves else steps
logprob = 2. * batch_network(params, data)
data, key, _, num_accepts = lax.fori_loop(0, nsteps, step_fn,
(data, key, logprob, 0.))
pmove = jnp.sum(num_accepts) / (nsteps * batch_per_device)
pmove = constants.pmean(pmove)
return data, pmove
return mcmc_step
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import moving_averages
from tensorflow.python.util.tf_export import tf_export
@tf_export('layers.BatchNormalization')
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: An `int` or list of `int`, the axis or axes that should be
normalized, typically the features axis/axes. For instance, after a
`Conv2D` layer with `data_format="channels_first"`, set `axis=1`. If a
list of axes is provided, each axis in `axis` will be normalized
simultaneously. Default is `-1` which takes uses last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor, with
dimension size 1 in all reduced (non-axis) dimensions).
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
else:
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if fused is None:
fused = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
if not isinstance(self.axis, list):
raise TypeError('axis must be int or list, type given: %s'
% type(self.axis))
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused:
# Currently fused batch norm doesn't support renorm. It also only supports
# an input tensor of rank 4 and a channel dimension on axis 1 or 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = (not self.renorm and
ndims == 4 and
self.axis in [[1], [3]] and
self.virtual_batch_size is None and
self.adjustment is None)
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = base.InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_variable(
name='gamma',
shape=param_shape,
dtype=param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(
1.0, dtype=param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_variable(
name='beta',
shape=param_shape,
dtype=param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(
0.0, dtype=param_dtype, shape=param_shape)
# Disable variable partitioning when creating the moving mean and variance
try:
if self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_variable(
name='moving_mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
trainable=False)
self._one_minus_decay = 1.0 - self.momentum
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(
name=name,
shape=shape,
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
device = ((lambda _: self.moving_mean.device)
if context.in_graph_mode() else self.moving_mean.device)
with ops.device(device):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
device = ((lambda _: self.moving_variance.device)
if context.in_graph_mode() else self.moving_variance.device)
with ops.device(device):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, one_minus_decay):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, one_minus_decay]) as scope:
with ops.colocate_with(variable):
update_delta = math_ops.multiply(
math_ops.subtract(variable.read_value(), value),
one_minus_decay)
if isinstance(variable, resource_variable_ops.ResourceVariable):
# state_ops.assign_sub does an extra read_variable_op after the
# assign. We avoid that here.
return gen_resource_variable_ops.assign_sub_variable_op(
variable.handle, update_delta, name=scope)
else:
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = utils.smart_cond(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = ops.convert_to_tensor(self._one_minus_decay)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = utils.smart_cond(training, lambda: d, lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, self.renorm_momentum, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, self.renorm_momentum, zero_debias=False)
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return utils.smart_cond(training, _do_update, _fake_update)
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
in_eager_mode = context.in_eager_mode()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
return undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = utils.smart_cond(training,
lambda: mean,
lambda: moving_mean)
variance = utils.smart_cond(training,
lambda: variance,
lambda: moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(new_mean,
axis=1, keep_dims=True)
new_variance = math_ops.reduce_mean(new_variance,
axis=1, keep_dims=True)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return moving_averages.assign_moving_average(
var, value, self.momentum, zero_debias=False)
mean_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
variance_update = utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if context.in_graph_mode():
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
return undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('layers.batch_normalization')
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
Arguments:
inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.